summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c47
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c71
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c79
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c956
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h29
-rw-r--r--drivers/net/ethernet/cadence/macb.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c62
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c152
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h135
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c17
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h65
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c558
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c336
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c63
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h95
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h58
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h38
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h207
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h347
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c127
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h268
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h74
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c43
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h242
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h252
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c488
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c228
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c52
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c47
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c268
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c195
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c780
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c193
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c358
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c34
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c35
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c22
-rw-r--r--drivers/net/ethernet/realtek/r8169.c86
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c504
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c107
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h24
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c27
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h11
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/hamradio/scc.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c12
-rw-r--r--drivers/net/macvlan.c37
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c51
-rw-r--r--drivers/net/usb/cdc_ncm.c14
-rw-r--r--drivers/net/usb/pegasus.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c14
-rw-r--r--drivers/net/usb/usbnet.c35
-rw-r--r--drivers/net/veth.c11
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wimax/i2400m/rx.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c117
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c26
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c36
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c30
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c113
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c193
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c55
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c63
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c31
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c109
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h22
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c119
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h57
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c311
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h66
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c2277
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.h183
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c1369
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h113
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c40
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c105
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h3
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c90
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c95
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c3
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c32
-rw-r--r--drivers/net/wireless/iwlegacy/common.h1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h22
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c171
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c35
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c346
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c514
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h81
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c955
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c378
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h282
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h369
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h140
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h312
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h561
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h380
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h580
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h952
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c640
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c134
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c955
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1316
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h500
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c311
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c682
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c292
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c207
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c3080
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h393
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c355
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c437
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c1235
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h372
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c511
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h214
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c916
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c472
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/7000.c111
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c43
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c106
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c10
-rw-r--r--drivers/net/wireless/libertas/cfg.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/mwifiex/11n.c39
-rw-r--r--drivers/net/wireless/mwifiex/11n.h2
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/README1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c771
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h215
-rw-r--r--drivers/net/wireless/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c11
-rw-r--r--drivers/net/wireless/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/mwifiex/util.c2
-rw-r--r--drivers/net/wireless/mwl8k.c207
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c31
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig50
-rw-r--r--drivers/net/wireless/rtlwifi/base.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/Kconfig9
-rw-r--r--drivers/net/wireless/ti/Makefile4
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c (renamed from drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c24
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c37
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.h20
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.c116
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.h111
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c195
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c501
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.h140
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h40
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c87
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h55
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c80
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.h52
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h77
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c272
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c326
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.h127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c54
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h50
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig5
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c77
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c423
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h81
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h110
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c326
-rw-r--r--drivers/net/wireless/ti/wlcore/event.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h41
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h12
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1588
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c33
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c696
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h144
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c38
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c45
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c298
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h35
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h119
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h54
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c123
409 files changed, 36237 insertions, 7739 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 22399374b1e..94c1534dd57 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1249,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- __netpoll_free_rcu(np);
+ __netpoll_free_async(np);
}
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1877ed7ca08..1c9e09fbdff 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
pr_info("%s: Setting primary slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
goto out;
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 285f763e3cd..a668cd491cb 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -491,8 +491,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
+
+ /* According to C_CAN documentation, the reserved bit
+ * in IFx_MASK2 register is fixed 1
+ */
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
- IFX_WRITE_HIGH_16BIT(mask));
+ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 0e7bde73c3d..5f9a7ad9b96 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1019,10 +1019,8 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
sizeof(struct ems_cpc_msg), GFP_KERNEL);
- if (!dev->tx_msg_buffer) {
- dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
+ if (!dev->tx_msg_buffer)
goto cleanup_intr_in_buffer;
- }
usb_set_intfdata(intf, dev);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 74cfc017adc..797f847edf1 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -494,19 +494,15 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_tx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
@@ -564,19 +560,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_rx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
/* first copy the current receive buffers */
overlap = min(size, lp->rx_ring_size);
@@ -1933,31 +1924,23 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->tx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_dma_addr)
return -ENOMEM;
- }
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->rx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_dma_addr)
return -ENOMEM;
- }
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->tx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_skbuff)
return -ENOMEM;
- }
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->rx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_skbuff)
return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 571b5145c07..8f333151743 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
@@ -1649,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
u16 num_alloc = 0;
u16 rfd_next_to_use, next_next;
struct atl1c_rx_free_desc *rfd_desc;
+ dma_addr_t mapping;
next_next = rfd_next_to_use = rfd_ring->next_to_use;
if (++next_next == rfd_ring->count)
@@ -1675,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma = pci_map_single(pdev, vir_addr,
+ mapping = pci_map_single(pdev, vir_addr,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->length = 0;
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ break;
+ }
+ buffer_info->dma = mapping;
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_FROMDEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -2012,7 +2022,29 @@ check_sum:
return 0;
}
-static void atl1c_tx_map(struct atl1c_adapter *adapter,
+static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
+ struct atl1c_tpd_desc *first_tpd,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+ struct atl1c_buffer *buffer_info;
+ struct atl1c_tpd_desc *tpd;
+ u16 first_index, index;
+
+ first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
+ index = first_index;
+ while (index != tpd_ring->next_to_use) {
+ tpd = ATL1C_TPD_DESC(tpd_ring, index);
+ buffer_info = &tpd_ring->buffer_info[index];
+ atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+ memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
+ if (++index == tpd_ring->count)
+ index = 0;
+ }
+ tpd_ring->next_to_use = first_index;
+}
+
+static int atl1c_tx_map(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
enum atl1c_trans_queue type)
{
@@ -2037,7 +2069,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->length = map_len;
buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
- ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)))
+ goto err_dma;
+
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
mapped_len += map_len;
@@ -2059,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)))
+ goto err_dma;
+
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
@@ -2080,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
frag, 0,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto err_dma;
+
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
@@ -2092,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
/* The last buffer info contain the skb address,
so it will be free after unmap */
buffer_info->skb = skb;
+
+ return 0;
+
+err_dma:
+ buffer_info->dma = 0;
+ buffer_info->length = 0;
+ return -1;
}
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
@@ -2154,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
- atl1c_tx_map(adapter, skb, tpd, type);
- atl1c_tx_queue(adapter, skb, tpd, type);
+ if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+ netif_info(adapter, tx_done, adapter->netdev,
+ "tx-skb droppted due to dma error\n");
+ /* roll back tpd/buffer */
+ atl1c_tx_rollback(adapter, tpd, type);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ dev_kfree_skb(skb);
+ } else {
+ atl1c_tx_queue(adapter, skb, tpd, type);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ }
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 9bd33db7fdd..3fd32880e52 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -301,7 +301,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
} else {
- new_skb = netdev_alloc_skb(bgmac->net_dev, len);
+ new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
if (new_skb) {
skb_put(new_skb, len);
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
@@ -535,7 +535,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
* PHY ops
**************************************************/
-u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
+static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
{
struct bcma_device *core;
u16 phy_access_addr;
@@ -584,7 +584,7 @@ u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
+static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
{
struct bcma_device *core;
u16 phy_access_addr;
@@ -617,9 +617,13 @@ void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
tmp |= value;
bcma_write32(core, phy_access_addr, tmp);
- if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000))
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
phyaddr, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
@@ -761,6 +765,26 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
udelay(2);
}
+static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+{
+ u32 tmp;
+
+ tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ tmp = (addr[4] << 8) | addr[5];
+ bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+}
+
+static void bgmac_set_rx_mode(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ if (net_dev->flags & IFF_PROMISC)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+}
+
#if 0 /* We don't use that regs yet */
static void bgmac_chip_stats_update(struct bgmac *bgmac)
{
@@ -889,8 +913,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
sw_type = et_swtype;
} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if (0) {
- /* TODO */
+ } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
+ BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
bcma_chipco_chipctl_maskset(cc, 1,
~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
@@ -948,6 +974,7 @@ static void bgmac_chip_intrs_on(struct bgmac *bgmac)
static void bgmac_chip_intrs_off(struct bgmac *bgmac)
{
bgmac_write(bgmac, BGMAC_INT_MASK, 0);
+ bgmac_read(bgmac, BGMAC_INT_MASK);
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
@@ -1004,8 +1031,6 @@ static void bgmac_enable(struct bgmac *bgmac)
static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
{
struct bgmac_dma_ring *ring;
- u8 *mac = bgmac->net_dev->dev_addr;
- u32 tmp;
int i;
/* 1 interrupt per received frame */
@@ -1014,21 +1039,14 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
- if (bgmac->net_dev->flags & IFF_PROMISC)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, false);
- else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, false);
+ bgmac_set_rx_mode(bgmac->net_dev);
- /* Set MAC addr */
- tmp = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
- bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
- tmp = (mac[4] << 8) | mac[5];
- bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+ bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
if (bgmac->loopback)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, true);
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, true);
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
@@ -1160,6 +1178,19 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
return bgmac_dma_tx_add(bgmac, ring, skb);
}
+static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(net_dev, addr);
+ if (ret < 0)
+ return ret;
+ bgmac_write_mac_address(bgmac, (u8 *)addr);
+ eth_commit_mac_addr_change(net_dev, addr);
+ return 0;
+}
+
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
@@ -1190,7 +1221,9 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
.ndo_start_xmit = bgmac_start_xmit,
- .ndo_set_mac_address = eth_mac_addr, /* generic, sets dev_addr */
+ .ndo_set_rx_mode = bgmac_set_rx_mode,
+ .ndo_set_mac_address = bgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bgmac_ioctl,
};
@@ -1290,6 +1323,12 @@ static int bgmac_probe(struct bcma_device *core)
return -ENOTSUPP;
}
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
+ eth_random_addr(mac);
+ dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+ }
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 12994701704..4ede614c81f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -339,7 +339,7 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080
-#define BGMAC_CHIPCTL_1_SW_TYPE_RGMI 0x000000C0
+#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
#define BGMAC_SPEED_10 0x0001
@@ -450,7 +450,4 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
bgmac_maskset(bgmac, offset, ~0, set);
}
-u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg);
-void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value);
-
#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 90195e324a6..fdb9b565541 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,6 +44,7 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -93,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 129
+#define TG3_MIN_NUM 130
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "January 06, 2013"
+#define DRV_MODULE_RELDATE "February 14, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -263,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
@@ -330,6 +332,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
@@ -573,7 +576,9 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
{
tp->write32_mbox(tp, off, val);
- if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
+ (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
+ !tg3_flag(tp, ICH_WORKAROUND)))
tp->read32_mbox(tp, off);
}
@@ -583,7 +588,8 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
writel(val, mbox);
if (tg3_flag(tp, TXD_MBOX_HWBUG))
writel(val, mbox);
- if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
+ tg3_flag(tp, FLUSH_POSTED_WRITES))
readl(mbox);
}
@@ -612,7 +618,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
return;
@@ -637,7 +643,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
*val = 0;
return;
@@ -665,7 +671,7 @@ static void tg3_ape_lock_init(struct tg3 *tp)
int i;
u32 regbase, bit;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
regbase = TG3_APE_LOCK_GRANT;
else
regbase = TG3_APE_PER_LOCK_GRANT;
@@ -701,7 +707,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -720,7 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return -EINVAL;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5761) {
req = TG3_APE_LOCK_REQ;
gnt = TG3_APE_LOCK_GRANT;
} else {
@@ -758,7 +764,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -777,7 +783,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
gnt = TG3_APE_LOCK_GRANT;
else
gnt = TG3_APE_PER_LOCK_GRANT;
@@ -1091,7 +1097,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
#define PHY_BUSY_LOOPS 5000
-static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 *val)
{
u32 frame_val;
unsigned int loops;
@@ -1107,7 +1114,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
*val = 0x0;
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1144,7 +1151,13 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
return ret;
}
-static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ return __tg3_readphy(tp, tp->phy_addr, reg, val);
+}
+
+static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 val)
{
u32 frame_val;
unsigned int loops;
@@ -1162,7 +1175,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
tg3_ape_lock(tp, tp->phy_ape_lock);
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1197,6 +1210,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ return __tg3_writephy(tp, tp->phy_addr, reg, val);
+}
+
static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
{
int err;
@@ -1461,7 +1479,7 @@ static void tg3_mdio_start(struct tg3 *tp)
udelay(80);
if (tg3_flag(tp, MDIOBUS_INITED) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
}
@@ -1476,7 +1494,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->phy_addr = tp->pci_fn + 1;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
else
is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
@@ -1564,7 +1582,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tg3_flag_set(tp, MDIOBUS_INITED);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
return 0;
@@ -1781,7 +1799,12 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* We don't use firmware. */
+ return 0;
+ }
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* Wait up to 20ms for init done. */
for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
@@ -1810,7 +1833,7 @@ static int tg3_poll_fw(struct tg3 *tp)
netdev_info(tp->dev, "No firmware running\n");
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
/* The 57765 A0 needs a little more
* time to do some important work.
*/
@@ -1940,7 +1963,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else if (phydev->speed == SPEED_1000 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ tg3_asic_rev(tp) != ASIC_REV_5785)
mac_mode |= MAC_MODE_PORT_MODE_GMII;
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -1967,7 +1990,7 @@ static void tg3_adjust_link(struct net_device *dev)
udelay(40);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5785) {
if (phydev->speed == SPEED_10)
tw32(MAC_MI_STAT,
MAC_MI_STAT_10MBPS_MODE |
@@ -2159,7 +2182,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
@@ -2314,8 +2337,8 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
u32 val;
if (tp->link_config.active_speed == SPEED_1000 &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_flag(tp, 57765_CLASS)) &&
!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
@@ -2519,7 +2542,7 @@ static int tg3_phy_reset(struct tg3 *tp)
u32 val, cpmuctrl;
int err;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
@@ -2534,9 +2557,9 @@ static int tg3_phy_reset(struct tg3 *tp)
tg3_link_report(tp);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
err = tg3_phy_reset_5703_4_5(tp);
if (err)
return err;
@@ -2544,8 +2567,8 @@ static int tg3_phy_reset(struct tg3 *tp)
}
cpmuctrl = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
cpmuctrl = tr32(TG3_CPMU_CTRL);
if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
tw32(TG3_CPMU_CTRL,
@@ -2563,8 +2586,8 @@ static int tg3_phy_reset(struct tg3 *tp)
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2642,12 +2665,12 @@ out:
val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
tg3_phydsp_write(tp, 0xffb, 0x4000);
tg3_phy_toggle_automdix(tp, 1);
@@ -2675,8 +2698,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
{
u32 status, shift;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
else
status = tr32(TG3_CPMU_DRV_STATUS);
@@ -2685,8 +2708,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
status &= ~(TG3_GPIO_MSG_MASK << shift);
status |= (newstat << shift);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
else
tw32(TG3_CPMU_DRV_STATUS, status);
@@ -2699,9 +2722,9 @@ static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
return -EIO;
@@ -2724,8 +2747,8 @@ static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
u32 grc_local_ctrl;
if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
return;
grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
@@ -2748,8 +2771,8 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
(GRC_LCLCTRL_GPIO_OE0 |
GRC_LCLCTRL_GPIO_OE1 |
@@ -2781,7 +2804,7 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
u32 grc_local_ctrl = 0;
/* Workaround to prevent overdrawing Amps. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
grc_local_ctrl,
@@ -2853,9 +2876,9 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_frob_aux_power_5717(tp, include_wol ?
tg3_flag(tp, WOL_ENABLE) != 0 : 0);
return;
@@ -2907,7 +2930,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
u32 val;
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2919,7 +2942,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_bmcr_reset(tp);
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
@@ -2958,16 +2981,16 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
/* The PHY should not be powered down on some chips because
* of bugs.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5780 &&
(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
!tp->pci_fn))
return;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
@@ -3350,7 +3373,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
!tg3_flag(tp, 57765_PLUS))
tw32(NVRAM_ADDR, phy_addr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
!tg3_flag(tp, 5755_PLUS) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -3435,7 +3458,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -3453,6 +3476,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
udelay(10);
} else {
+ /*
+ * There is only an Rx CPU for the 5750 derivative in the
+ * BCM4785.
+ */
+ if (tg3_flag(tp, IS_SSB_CORE))
+ return 0;
+
for (i = 0; i < 10000; i++) {
tw32(offset + CPU_STATE, 0xffffffff);
tw32(offset + CPU_MODE, CPU_MODE_HALT);
@@ -3606,7 +3636,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
info.fw_len = tp->fw->size - 12;
info.fw_data = &fw_data[3];
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
} else {
@@ -3664,8 +3694,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
for (i = 0; i < 12; i++) {
tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
@@ -3784,7 +3814,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_setup_phy(tp, 0);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val;
val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3826,8 +3856,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
mac_mode = MAC_MODE_PORT_MODE_MII;
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
SPEED_100 : SPEED_10;
if (tg3_5700_link_polarity(tp, speed))
@@ -3860,8 +3889,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
}
if (!tg3_flag(tp, WOL_SPEED_100MB) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 base_val;
base_val = tp->pci_clock_ctrl;
@@ -3872,13 +3901,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
CLOCK_CTRL_PWRDOWN_PLL133, 40);
} else if (tg3_flag(tp, 5780_CLASS) ||
tg3_flag(tp, CPMU_PRESENT) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
/* do nothing */
} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
u32 newbits1, newbits2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_ALTCLK);
@@ -3900,8 +3929,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (!tg3_flag(tp, 5705_PLUS)) {
u32 newbits3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_44MHZ_CORE);
@@ -3920,8 +3949,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_frob_aux_power(tp, true);
/* Workaround for unstable PLL clock */
- if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
- (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ if ((!tg3_flag(tp, IS_SSB_CORE)) &&
+ ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
+ (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
u32 val = tr32(0x7d00);
val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
@@ -4012,8 +4042,8 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
err = tg3_writephy(tp, MII_CTRL1000, new_adv);
@@ -4042,7 +4072,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (err)
val = 0;
- switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ switch (tg3_asic_rev(tp)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
case ASIC_REV_57766:
@@ -4190,8 +4220,8 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
return false;
if (tgtadv &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
@@ -4275,9 +4305,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
/* Some third-party PHYs need to be reset on link going
* down.
*/
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) &&
tp->link_up) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -4319,8 +4349,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
return err;
}
}
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
/* 5701 {A0,B0} CRC bug workaround */
tg3_writephy(tp, 0x15, 0x0a75);
tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
@@ -4337,8 +4367,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
tg3_writephy(tp, MII_TG3_IMASK, ~0);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -4442,6 +4472,15 @@ relink:
if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
tg3_phy_copper_begin(tp);
+ if (tg3_flag(tp, ROBOSWITCH)) {
+ current_link_up = 1;
+ /* FIXME: when BCM5325 switch is used use 100 MBit/s */
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+ }
+
tg3_readphy(tp, MII_BMSR, &bmsr);
if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
@@ -4460,11 +4499,31 @@ relink:
else
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ /* In order for the 5750 core in BCM4785 chip to work properly
+ * in RGMII mode, the Led Control Register must be set up.
+ */
+ if (tg3_flag(tp, RGMII_MODE)) {
+ u32 led_ctrl = tr32(MAC_LED_CTRL);
+ led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
+
+ if (tp->link_config.active_speed == SPEED_10)
+ led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
+ else if (tp->link_config.active_speed == SPEED_100)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_100MBPS_ON);
+ else if (tp->link_config.active_speed == SPEED_1000)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON);
+
+ tw32(MAC_LED_CTRL, led_ctrl);
+ udelay(40);
+ }
+
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
if (current_link_up == 1 &&
tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -4476,7 +4535,7 @@ relink:
* ??? send/receive packets...
*/
if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
- tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
@@ -4495,7 +4554,7 @@ relink:
}
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
current_link_up == 1 &&
tp->link_config.active_speed == SPEED_1000 &&
(tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
@@ -4950,8 +5009,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
port_a = 1;
current_link_up = 0;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
workaround = 1;
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
port_a = 0;
@@ -5280,7 +5339,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5349,8 +5408,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
bmcr = new_bmcr;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5485,7 +5543,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
else
err = tg3_setup_copper_phy(tp, force_reset);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
u32 scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
@@ -5503,8 +5561,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
(6 << TX_LENGTHS_IPG_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -7128,7 +7186,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
dma_addr_t new_addr = 0;
int ret = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5701)
new_skb = skb_copy(skb, GFP_ATOMIC);
else {
int more_headroom = 4 - ((unsigned long)skb->data & 3);
@@ -7302,7 +7360,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
@@ -7458,7 +7516,7 @@ static void tg3_mac_loopback(struct tg3 *tp, bool enable)
if (tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tg3_asic_rev(tp) == ASIC_REV_5700)
tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
@@ -7517,7 +7575,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
udelay(40);
if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_asic_rev(tp) == ASIC_REV_5785) {
tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
MII_TG3_FET_PTEST_FRC_TX_LINK |
MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -7541,7 +7599,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
if (masked_phy_id == TG3_PHY_ID_BCM5401)
@@ -8219,7 +8277,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
/* Set MAX PCI retry to zero. */
val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE))
val |= PCISTATE_RETRY_SAME_DMA;
/* Allow reads and writes to the APE register and memory space. */
@@ -8291,7 +8349,7 @@ static int tg3_chip_reset(struct tg3 *tp)
*/
tg3_save_pci_state(tp);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 5755_PLUS))
tw32(GRC_FASTBOOT_PC, 0);
@@ -8326,7 +8384,7 @@ static int tg3_chip_reset(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
@@ -8336,19 +8394,19 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Force PCIe 1.0a mode */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS) &&
tr32(TG3_PCIE_PHY_TSTCTL) ==
(TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
tw32(GRC_MISC_CFG, (1 << 29));
val |= (1 << 29);
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
tw32(GRC_VCPU_EXT_CTRL,
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -8391,7 +8449,7 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
u16 val16;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
int j;
u32 cfg_val;
@@ -8432,23 +8490,33 @@ static int tg3_chip_reset(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
tw32(0x5000, 0x400);
}
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /*
+ * BCM4785: In order to avoid repercussions from using
+ * potentially defective internal ROM, stop the Rx RISC CPU,
+ * which is not required.
+ */
+ tg3_stop_fw(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ }
+
tw32(GRC_MODE, tp->grc_mode);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
val = tr32(0xc4);
tw32(0xc4, val | (1 << 15));
}
if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
@@ -8474,15 +8542,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_mdio_start(tp);
if (tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720) {
val = tr32(TG3_CPMU_CLCK_ORIDE);
tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
}
@@ -8694,7 +8762,7 @@ static void tg3_rings_reset(struct tg3 *tp)
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
else if (tg3_flag(tp, 57765_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ tg3_asic_rev(tp) == ASIC_REV_5762)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8710,8 +8778,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
@@ -8817,12 +8885,12 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787)
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
else
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
@@ -9004,7 +9072,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
TG3_CPMU_EEE_LNKIDL_UART_IDL;
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
@@ -9017,7 +9085,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_EEEMD_LPI_IN_RX |
TG3_CPMU_EEEMD_EEE_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ if (tg3_asic_rev(tp) != ASIC_REV_5717)
val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
if (tg3_flag(tp, ENABLE_APE))
@@ -9043,7 +9111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val);
@@ -9064,7 +9132,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_HST_ACC, val);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
PCIE_PWR_MGMT_L1_THRESH_4MS;
@@ -9094,7 +9162,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_CLASS)) {
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of PL PCIE block registers. */
@@ -9109,8 +9177,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
- u32 grc_mode = tr32(GRC_MODE);
+ if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
+ u32 grc_mode;
+
+ /* Fix transmit hangs */
+ val = tr32(TG3_CPMU_PADRNG_CTL);
+ val |= TG3_CPMU_PADRNG_CTL_RDIV2;
+ tw32(TG3_CPMU_PADRNG_CTL, val);
+
+ grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of DL PCIE block registers. */
val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
@@ -9142,7 +9217,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE)) {
val = tr32(TG3PCI_PCISTATE);
val |= PCISTATE_RETRY_SAME_DMA;
@@ -9160,7 +9235,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3PCI_PCISTATE, val);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
/* Enable some hw fixes. */
val = tr32(TG3PCI_MSI_DATA);
val |= (1 << 26) | (1 << 28) | (1 << 29);
@@ -9179,15 +9254,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
if (!tg3_flag(tp, 57765_CLASS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
+ tg3_asic_rev(tp) != ASIC_REV_5761) {
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
*/
@@ -9227,9 +9302,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Initialize MBUF/DESC pool. */
if (tg3_flag(tp, 5750_PLUS)) {
/* Do nothing. */
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
else
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
@@ -9267,11 +9342,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->bufmgr_config.dma_high_water);
val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
tw32(BUFMGR_MODE, val);
for (i = 0; i < 2000; i++) {
@@ -9284,7 +9359,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return -ENODEV;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
tg3_setup_rxbd_thresholds(tp);
@@ -9322,7 +9397,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -9336,7 +9411,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
tg3_flag(tp, 57765_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -9378,8 +9453,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(6 << TX_LENGTHS_IPG_SHIFT) |
(32 << TX_LENGTHS_SLOT_TIME_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -9399,20 +9474,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9423,35 +9498,43 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ tp->dma_limit = 0;
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
+ }
+ }
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
if (tg3_flag(tp, 57765_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS)) {
u32 tgtreg;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
tgtreg = TG3_RDMA_RSRVCTRL_REG2;
else
tgtreg = TG3_RDMA_RSRVCTRL_REG;
val = tr32(tgtreg);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -9462,12 +9545,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
u32 tgtreg;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
else
tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
@@ -9550,7 +9633,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
if (!tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
@@ -9568,11 +9651,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
GRC_LCLCTRL_GPIO_OUTPUT3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5755)
gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
tp->grc_local_ctrl &= ~gpio_mask;
@@ -9607,11 +9690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
WDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
/* nothing */
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9623,7 +9706,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 5755_PLUS))
val |= WDMAC_MODE_STATUS_TAG_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
val |= WDMAC_MODE_BURST_ALL_DATA;
tw32_f(WDMAC_MODE, val);
@@ -9634,10 +9717,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
&pcix_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703) {
pcix_cmd &= ~PCI_X_CMD_MAX_READ;
pcix_cmd |= PCI_X_CMD_READ_2K;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
pcix_cmd |= PCI_X_CMD_READ_2K;
}
@@ -9648,7 +9731,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719) {
for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
break;
@@ -9665,7 +9748,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (!tg3_flag(tp, 5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
tw32(SNDDATAC_MODE,
SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
else
@@ -9688,7 +9771,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
err = tg3_load_5701_a0_firmware_fix(tp);
if (err)
return err;
@@ -9703,11 +9786,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->tx_mode = TX_MODE_ENABLE;
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
tp->tx_mode &= ~val;
tp->tx_mode |= tr32(MAC_TX_MODE) & val;
@@ -9758,8 +9841,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
- !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+ if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
+ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
/* Set drive transmission level to 1.2V */
/* only if the signal pre-emphasis bit is not set */
val = tr32(MAC_SERDES_CFG);
@@ -9767,7 +9850,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val |= 0x880;
tw32(MAC_SERDES_CFG, val);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
tw32(MAC_SERDES_CFG, 0x616000);
}
@@ -9780,14 +9863,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val = 2;
tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
/* Use hardware link auto-negotiation */
tg3_flag_set(tp, HW_AUTONEG);
}
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ tg3_asic_rev(tp) == ASIC_REV_5714) {
u32 tmp;
tmp = tr32(SERDES_RX_CTRL);
@@ -10041,9 +10124,9 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
} else {
u32 val = tr32(HOSTCC_FLOW_ATTN);
@@ -10091,10 +10174,15 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ /* BCM4785: Flush posted writes from GbE to host memory. */
+ tr32(HOSTCC_MODE);
+ }
+
if (!tg3_flag(tp, TAGGED_STATUS)) {
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
@@ -10212,7 +10300,7 @@ restart_timer:
static void tg3_timer_init(struct tg3 *tp)
{
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
!tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
@@ -10793,7 +10881,7 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -10863,8 +10951,8 @@ static u64 tg3_calc_crc_errors(struct tg3 *tp)
struct tg3_hw_stats *hw_stats = tp->hw_stats;
if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 val;
if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
@@ -12389,11 +12477,11 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
else if (tg3_flag(tp, 57765_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ tg3_asic_rev(tp) == ASIC_REV_5762)
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
else if (tg3_flag(tp, 5705_PLUS))
mem_tbl = mem_tbl_5705;
@@ -12505,7 +12593,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
mss |= (TG3_TSO_TCP_OPT_LEN << 9);
} else {
base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
@@ -12691,7 +12779,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
* errata. Also, the MAC loopback test is deprecated for
* all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT)) {
tg3_mac_loopback(tp, true);
@@ -12969,7 +13057,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ err = __tg3_readphy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &mii_regval);
spin_unlock_bh(&tp->lock);
data->val_out = mii_regval;
@@ -12985,7 +13074,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ err = __tg3_writephy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
spin_unlock_bh(&tp->lock);
return err;
@@ -13176,7 +13266,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
reset_phy = 1;
err = tg3_restart_hw(tp, reset_phy);
@@ -13289,7 +13379,7 @@ static void tg3_get_nvram_info(struct tg3 *tp)
tw32(NVRAM_CFG1, nvcfg1);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
tg3_flag(tp, 5780_CLASS)) {
switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
@@ -13730,7 +13820,7 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
tg3_flag_set(tp, NO_NVRAM);
return;
@@ -13791,7 +13881,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13837,7 +13928,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13850,7 +13942,7 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
u32 val;
if (tg3_nvram_read(tp, 0, &val))
@@ -13865,6 +13957,14 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static void tg3_nvram_init(struct tg3 *tp)
{
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
(EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -13877,8 +13977,8 @@ static void tg3_nvram_init(struct tg3 *tp)
tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
udelay(100);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701) {
tg3_flag_set(tp, NVRAM);
if (tg3_nvram_lock(tp)) {
@@ -13891,26 +13991,26 @@ static void tg3_nvram_init(struct tg3 *tp)
tp->nvram_size = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
tg3_get_5752_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755)
tg3_get_5755_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_get_5787_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5761)
tg3_get_5761_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_get_5717_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_get_5720_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -14023,7 +14123,7 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, WOL_CAP);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
tg3_flag_clear(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, IS_NIC);
@@ -14050,13 +14150,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
ver >>= NIC_SRAM_DATA_VER_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_asic_rev(tp) != ASIC_REV_5703 &&
(ver > 0) && (ver < 0x100))
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
@@ -14104,18 +14204,16 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
/* Default to PHY_1_MODE if 0 (MAC_MODE) is
* read on some older 5700/5701 bootcode.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5701)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
break;
case SHASTA_EXT_LED_SHARED:
tp->led_ctrl = LED_CTRL_MODE_SHARED;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
@@ -14126,19 +14224,19 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
case SHASTA_EXT_LED_COMBO:
tp->led_ctrl = LED_CTRL_MODE_COMBO;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
}
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) &&
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
@@ -14182,13 +14280,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
if ((tg3_flag(tp, 57765_PLUS) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
if (tg3_flag(tp, PCI_EXPRESS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
u32 cfg3;
@@ -14391,10 +14489,19 @@ static int tg3_phy_probe(struct tg3 *tp)
* subsys device table.
*/
p = tg3_lookup_by_subsys(tp);
- if (!p)
+ if (p) {
+ tp->phy_id = p->phy_id;
+ } else if (!tg3_flag(tp, IS_SSB_CORE)) {
+ /* For now we saw the IDs 0xbc050cd0,
+ * 0xbc050f80 and 0xbc050c30 on devices
+ * connected to an BCM4785 and there are
+ * probably more. Just assume that the phy is
+ * supported when it is connected to a SSB core
+ * for now.
+ */
return -ENODEV;
+ }
- tp->phy_id = p->phy_id;
if (!tp->phy_id ||
tp->phy_id == TG3_PHY_ID_BCM8002)
tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
@@ -14402,13 +14509,13 @@ static int tg3_phy_probe(struct tg3 *tp)
}
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
- (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+ (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
+ (tg3_asic_rev(tp) == ASIC_REV_57765 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
tg3_phy_init_link_config(tp);
@@ -14518,7 +14625,7 @@ out_not_found:
return;
out_no_vpd:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
@@ -14526,7 +14633,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM5718");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
strcpy(tp->board_part_number, "BCM57780");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
@@ -14537,7 +14644,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57788");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
strcpy(tp->board_part_number, "BCM57761");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
@@ -14552,7 +14659,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
strcpy(tp->board_part_number, "BCM57762");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
@@ -14563,7 +14670,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57786");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
nomatch:
@@ -14804,7 +14911,7 @@ static void tg3_read_otp_ver(struct tg3 *tp)
{
u32 val, val2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
return;
if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
@@ -14910,7 +15017,7 @@ static struct pci_dev *tg3_find_peer(struct tg3 *tp)
static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
{
tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
u32 reg;
/* All devices that use the alternate
@@ -14947,47 +15054,47 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
/* Wrong chip ID in 5752 A0. This code can be removed later
* as A0 is not in production.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tg3_flag_set(tp, 5717_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766)
tg3_flag_set(tp, 57765_CLASS);
if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, 5755_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tg3_flag_set(tp, 5780_CLASS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5906 ||
tg3_flag(tp, 5755_PLUS) ||
tg3_flag(tp, 5780_CLASS))
tg3_flag_set(tp, 5750_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
tg3_flag(tp, 5750_PLUS))
tg3_flag_set(tp, 5705_PLUS);
}
@@ -14997,13 +15104,13 @@ static bool tg3_10_100_only_device(struct tg3 *tp,
{
u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
- (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET))
return true;
if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
return true;
} else {
@@ -15064,8 +15171,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* enable this workaround if the 5703 is on the secondary
* bus of these ICH bridges.
*/
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
- (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -15105,7 +15212,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5701) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -15165,29 +15272,29 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} while (bridge);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tp->pdev_peer = tg3_find_peer(tp);
/* Determine TSO capabilities */
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
else if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, HW_TSO_2);
else if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, HW_TSO_1);
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
- tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
+ tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
tg3_flag_clear(tp, TSO_BUG);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
tp->fw_needed = FIRMWARE_TG3TSO;
@@ -15209,22 +15316,22 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->fw_needed = NULL;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSI);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
- tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5750_BX ||
+ (tg3_asic_rev(tp) == ASIC_REV_5714 &&
+ tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
tp->pdev_peer == tp->pdev))
tg3_flag_clear(tp, SUPPORT_MSI);
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_set(tp, 1SHOT_MSI);
}
@@ -15240,26 +15347,26 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rxq_max = TG3_RSS_MAX_NUM_QS;
tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tp->txq_max = tp->irq_max - 1;
}
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
@@ -15277,20 +15384,19 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_clear(tp, HW_TSO_2);
tg3_flag_clear(tp, TSO_CAPABLE);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
tg3_flag_set(tp, CLKREQ_BUG);
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
tg3_flag_set(tp, L1PLLPD_EN);
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
/* BCM5785 devices are effectively PCIe devices, and should
* follow PCIe codepaths, but do not have a PCIe capabilities
* section.
@@ -15323,7 +15429,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&tp->pci_cacheline_sz);
pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
&tp->pci_lat_timer);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
tp->pci_lat_timer < 64) {
tp->pci_lat_timer = 64;
pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
@@ -15333,7 +15439,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Important! -- It is critical that the PCI-X hw workaround
* situation is decided before the first MMIO register access.
*/
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
/* 5700 BX chips need to have their TX producer index
* mailboxes written twice to workaround a bug.
*/
@@ -15375,7 +15481,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCI_32BIT);
/* Chip-specific fixup from Broadcom driver */
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
(!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
@@ -15392,9 +15498,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Various workaround register access methods */
if (tg3_flag(tp, PCIX_TARGET_HWBUG))
tp->write32 = tg3_write_indirect_reg32;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
(tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
/*
* Back to back register writes can cause problems on these
* chips, the workaround is to read back all reg writes
@@ -15426,7 +15532,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pci_cmd &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->read32_mbox = tg3_read32_mbox_5906;
tp->write32_mbox = tg3_write32_mbox_5906;
tp->write32_tx_mbox = tg3_write32_mbox_5906;
@@ -15435,8 +15541,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->write32 == tg3_write_indirect_reg32 ||
(tg3_flag(tp, PCIX_MODE) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)))
tg3_flag_set(tp, SRAM_USE_CONFIG);
/* The memory arbiter has to be enabled in order for SRAM accesses
@@ -15448,7 +15554,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tg3_flag(tp, PCIX_MODE)) {
pci_read_config_dword(tp->pdev,
@@ -15456,20 +15562,25 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&val);
tp->pci_fn = val & 0x7;
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
val = tr32(TG3_CPMU_STATUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
else
tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
TG3_CPMU_STATUS_FSHFT_5719;
}
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ tp->write32_tx_mbox = tg3_write_flush_reg32;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ }
+
/* Get eeprom hw config before calling tg3_set_power_state().
* In particular, the TG3_FLAG_IS_NIC flag must be
* determined before calling tg3_set_power_state() so that
@@ -15505,18 +15616,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* It is also used as eeprom write protect on LOMs.
*/
tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
tg3_flag(tp, EEPROM_WRITE_PROT))
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1);
/* Unused GPIO3 must be driven as output on 5752 because there
* are no pull-up resistors on unused GPIO pins.
*/
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5752)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
@@ -15530,7 +15641,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
GRC_LCLCTRL_GPIO_OUTPUT0;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
tp->grc_local_ctrl |=
tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
@@ -15544,42 +15655,42 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, JUMBO_RING_ENABLE);
/* Determine WakeOnLan speed to use. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
tg3_flag_clear(tp, WOL_SPEED_100MB);
} else {
tg3_flag_set(tp, WOL_SPEED_100MB);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ if (tg3_asic_rev(tp) == ASIC_REV_5906)
tp->phy_flags |= TG3_PHYFLG_IS_FET;
/* A few boards don't want Ethernet@WireSpeed phy feature */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5704_AX)
tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
if (tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_57780 &&
!tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761) {
if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
@@ -15589,8 +15700,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->phy_flags |= TG3_PHYFLG_BER_BUG;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
tp->phy_otp = tg3_read_otp_phycfg(tp);
if (tp->phy_otp == 0)
tp->phy_otp = TG3_OTP_DEFAULT;
@@ -15602,20 +15713,20 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->mi_mode = MAC_MI_MODE_BASE;
tp->coalesce_mode = 0;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
+ tg3_chip_rev(tp) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
/* Set these bits to enable statistics workaround. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
tg3_flag_set(tp, USE_PHYLIB);
err = tg3_mdio_init(tp);
@@ -15624,8 +15735,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
GRC_MODE_WORD_SWAP_B2HRX_DATA |
GRC_MODE_B2HRX_ENABLE |
@@ -15645,12 +15756,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
!tg3_flag(tp, PCIX_TARGET_HWBUG)) {
- u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
-
- if (chiprevid == CHIPREV_ID_5701_A0 ||
- chiprevid == CHIPREV_ID_5701_B0 ||
- chiprevid == CHIPREV_ID_5701_B2 ||
- chiprevid == CHIPREV_ID_5701_B5) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
void __iomem *sram_base;
/* Write some dummy words into the SRAM status block
@@ -15673,13 +15782,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
(grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
tg3_flag_set(tp, IS_5788);
if (!tg3_flag(tp, IS_5788) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tg3_flag_set(tp, TAGGED_STATUS);
if (tg3_flag(tp, TAGGED_STATUS)) {
tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
@@ -15712,7 +15821,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
else
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
@@ -15722,7 +15831,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* change bit implementation, so we must use the
* status register in those cases.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tg3_flag_set(tp, USE_LINKCHG_REG);
else
tg3_flag_clear(tp, USE_LINKCHG_REG);
@@ -15732,7 +15841,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* upon subsystem IDs.
*/
if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ tg3_asic_rev(tp) == ASIC_REV_5701 &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
tg3_flag_set(tp, USE_LINKCHG_REG);
@@ -15746,7 +15855,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -15763,9 +15872,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Increment the rx prod index on the rx std ring by at most
* 8 for these chips to workaround hw errata.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
if (tg3_flag(tp, ASPM_WORKAROUND))
@@ -15806,14 +15915,21 @@ static int tg3_get_device_address(struct tg3 *tp)
struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
+ int err;
#ifdef CONFIG_SPARC
if (!tg3_get_macaddr_sparc(tp))
return 0;
#endif
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
+ if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ return 0;
+ }
+
mac_offset = 0x7c;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
@@ -15826,7 +15942,7 @@ static int tg3_get_device_address(struct tg3 *tp)
mac_offset = 0xcc;
if (tp->pci_fn > 1)
mac_offset += 0x18c;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mac_offset = 0x10;
/* First try to get it from MAC address mailbox. */
@@ -15894,8 +16010,8 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
/* On 5703 and later chips, the boundary bits have no
* effect.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
!tg3_flag(tp, PCI_EXPRESS))
goto out;
@@ -16133,14 +16249,14 @@ static int tg3_test_dma(struct tg3 *tp)
/* DMA read watermark not used on PCIE */
tp->dma_rwctrl |= 0x00180000;
} else if (!tg3_flag(tp, PCIX_MODE)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750)
tp->dma_rwctrl |= 0x003f0000;
else
tp->dma_rwctrl |= 0x003f000f;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
u32 read_water = 0x7;
@@ -16149,35 +16265,37 @@ static int tg3_test_dma(struct tg3 *tp)
* better performance.
*/
if (tg3_flag(tp, 40BIT_DMA_BUG) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl |= 0x8000;
else if (ccval == 0x6 || ccval == 0x7)
tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703)
read_water = 4;
/* Set bit 23 to enable PCIX hw bug fix */
tp->dma_rwctrl |=
(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
(1 << 23);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
/* 5780 always in PCIX mode */
tp->dma_rwctrl |= 0x00144000;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
/* 5714 always in PCIX mode */
tp->dma_rwctrl |= 0x00148000;
} else {
tp->dma_rwctrl |= 0x001b000f;
}
}
+ if (tg3_flag(tp, ONE_DMA_AT_ONCE))
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl &= 0xfffffff0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
/* Remove this if it causes problems for some boards. */
tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
@@ -16201,8 +16319,8 @@ static int tg3_test_dma(struct tg3 *tp)
tg3_switch_clocks(tp);
#endif
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701)
goto out;
/* It is best to perform DMA test with maximum write burst size
@@ -16321,7 +16439,7 @@ static void tg3_init_bufmgr_config(struct tg3 *tp)
DEFAULT_MB_MACRX_LOW_WATER_5705;
tp->bufmgr_config.mbuf_high_water =
DEFAULT_MB_HIGH_WATER_5705;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->bufmgr_config.mbuf_mac_rx_low_water =
DEFAULT_MB_MACRX_LOW_WATER_5906;
tp->bufmgr_config.mbuf_high_water =
@@ -16516,6 +16634,18 @@ static int tg3_init_one(struct pci_dev *pdev,
else
tp->msg_enable = TG3_DEF_MSG_ENABLE;
+ if (pdev_is_ssb_gige_core(pdev)) {
+ tg3_flag_set(tp, IS_SSB_CORE);
+ if (ssb_gige_must_flush_posted_writes(pdev))
+ tg3_flag_set(tp, FLUSH_POSTED_WRITES);
+ if (ssb_gige_one_dma_at_once(pdev))
+ tg3_flag_set(tp, ONE_DMA_AT_ONCE);
+ if (ssb_gige_have_roboswitch(pdev))
+ tg3_flag_set(tp, ROBOSWITCH);
+ if (ssb_gige_is_rgmii(pdev))
+ tg3_flag_set(tp, RGMII_MODE);
+ }
+
/* The word/byte swap controls here control register access byte
* swapping. DMA data byte swapping is controlled in the GRC_MODE
* setting below.
@@ -16631,7 +16761,7 @@ static int tg3_init_one(struct pci_dev *pdev,
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
- if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (tg3_flag(tp, 5755_PLUS))
@@ -16651,11 +16781,11 @@ static int tg3_init_one(struct pci_dev *pdev,
if (features & NETIF_F_IPV6_CSUM)
features |= NETIF_F_TSO6;
if (tg3_flag(tp, HW_TSO_3) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
features |= NETIF_F_TSO_ECN;
}
@@ -16667,14 +16797,14 @@ static int tg3_init_one(struct pci_dev *pdev,
* MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
* loopback for the remaining devices.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT))
/* Add the loopback capability */
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
tg3_flag_set(tp, MAX_RXPEND_64);
@@ -16753,9 +16883,9 @@ static int tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
if (tg3_flag(tp, 5717_PLUS)) {
@@ -16765,6 +16895,8 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_timer_init(tp);
+ tg3_carrier_off(tp);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -16773,7 +16905,7 @@ static int tg3_init_one(struct pci_dev *pdev,
netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
tp->board_part_number,
- tp->pci_chip_rev_id,
+ tg3_chip_rev_id(tp),
tg3_bus_string(tp, str),
dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 9cd88a4b9a5..8d7d4c2ab5d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -120,9 +120,7 @@
#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200
#define MISC_HOST_CTRL_CHIPREV 0xffff0000
#define MISC_HOST_CTRL_CHIPREV_SHIFT 16
-#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \
- (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
- MISC_HOST_CTRL_CHIPREV_SHIFT)
+
#define CHIPREV_ID_5700_A0 0x7000
#define CHIPREV_ID_5700_A1 0x7001
#define CHIPREV_ID_5700_B0 0x7100
@@ -163,7 +161,7 @@
#define CHIPREV_ID_5719_A0 0x05719000
#define CHIPREV_ID_5720_A0 0x05720000
#define CHIPREV_ID_5762_A0 0x05762000
-#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
+
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
#define ASIC_REV_5703 0x01
@@ -187,7 +185,6 @@
#define ASIC_REV_5720 0x5720
#define ASIC_REV_57766 0x57766
#define ASIC_REV_5762 0x5762
-#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
#define CHIPREV_5700_CX 0x72
@@ -200,7 +197,6 @@
#define CHIPREV_5784_AX 0x57840
#define CHIPREV_5761_AX 0x57610
#define CHIPREV_57765_AX 0x577650
-#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
#define METAL_REV_A0 0x00
#define METAL_REV_A1 0x01
#define METAL_REV_B0 0x00
@@ -1164,6 +1160,8 @@
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
#define TG3_CPMU_PHY_STRAP 0x00003664
#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
+#define TG3_CPMU_PADRNG_CTL 0x00003668
+#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000
/* 0x3664 --> 0x36b0 unused */
#define TG3_CPMU_EEE_MODE 0x000036b0
@@ -3056,6 +3054,11 @@ enum TG3_FLAGS {
TG3_FLAG_57765_PLUS,
TG3_FLAG_57765_CLASS,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_IS_SSB_CORE,
+ TG3_FLAG_FLUSH_POSTED_WRITES,
+ TG3_FLAG_ROBOSWITCH,
+ TG3_FLAG_ONE_DMA_AT_ONCE,
+ TG3_FLAG_RGMII_MODE,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -3352,4 +3355,18 @@ struct tg3 {
bool link_up;
};
+/* Accessor macros for chip and asic attributes
+ *
+ * nb: Using static inlines equivalent to the accessor macros generates
+ * larger object code with gcc 4.7.
+ * Using statement expression macros to check tp with
+ * typecheck(struct tg3 *, tp) also creates larger objects.
+ */
+#define tg3_chip_rev_id(tp) \
+ ((tp)->pci_chip_rev_id)
+#define tg3_asic_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 12)
+#define tg3_chip_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 8)
+
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 352190b9ebe..79039439bfd 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+ /* Packets received while interrupts were disabled */
+ status = macb_readl(bp, RSR);
+ if (unlikely(status))
+ napi_reschedule(napi);
}
/* TODO: Handle errors */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 92170d50d9d..9488032d6d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1477,8 +1477,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
- if (pkt->vlan_ex)
+ if (pkt->vlan_ex) {
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
ret = napi_gro_frags(&rxq->rspq.napi);
if (ret == GRO_HELD)
@@ -1501,7 +1503,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ const struct cpl_rx_pkt *pkt = (void *)rsp;
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4010cb71bdd..28ceb841418 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -36,13 +36,13 @@
#define DRV_VER "4.6.62.0u"
#define DRV_NAME "be2net"
-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME "Emulex BladeEngine2"
+#define BE3_NAME "Emulex BladeEngine3"
+#define OC_NAME "Emulex OneConnect"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8b04880ee05..071aea79d21 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -93,13 +93,16 @@ static void be_mcc_notify(struct be_adapter *adapter)
* little endian) */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
+ u32 flags;
+
if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else {
- return false;
+ flags = le32_to_cpu(compl->flags);
+ if (flags & CQE_FLAGS_VALID_MASK) {
+ compl->flags = flags;
+ return true;
+ }
}
+ return false;
}
/* Need to reset the entire word that houses the valid bit */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7d534818d2f..3860888ac71 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -25,7 +25,7 @@
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
static unsigned int num_vfs;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 51215e1507d..0fe68c46267 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1609,7 +1609,6 @@ static int fec_enet_init(struct net_device *ndev)
}
spin_lock_init(&fep->hw_lock);
- spin_lock_init(&fep->tmreg_lock);
fep->netdev = ndev;
@@ -1841,6 +1840,9 @@ fec_probe(struct platform_device *pdev)
fec_reset_phy(pdev);
+ if (fep->bufdesc_ex)
+ fec_ptp_init(ndev, pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1856,9 +1858,6 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
- if (fep->bufdesc_ex)
- fec_ptp_init(ndev, pdev);
-
return 0;
failed_register:
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 817d081d2cd..7f91b0c5c26 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/netdevice.h>
@@ -76,10 +77,6 @@ static void mpc52xx_fec_stop(struct net_device *dev);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
-static u8 mpc52xx_fec_mac_addr[6];
-module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
-MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
-
#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
static int debug = -1; /* the above default */
@@ -110,15 +107,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
-static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct mpc52xx_fec __iomem *fec = priv->fec;
-
- *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
-}
-
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
@@ -853,6 +841,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
struct resource mem;
const u32 *prop;
int prop_size;
+ struct device_node *np = op->dev.of_node;
+ const char *mac_addr;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
@@ -866,7 +856,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->ndev = ndev;
/* Reserve FEC control zone */
- rv = of_address_to_resource(op->dev.of_node, 0, &mem);
+ rv = of_address_to_resource(np, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
@@ -919,7 +909,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Get the IRQ we need one by one */
/* Control */
- ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ ndev->irq = irq_of_parse_and_map(np, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -927,11 +917,33 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* TX */
priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
- /* MAC address init */
- if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
- memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
- else
- mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+ /*
+ * MAC address init:
+ *
+ * First try to read MAC address from DT
+ */
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr) {
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ } else {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ /*
+ * If the MAC addresse is not provided via DT then read
+ * it back from the controller regs
+ */
+ *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ }
+
+ /*
+ * Check if the MAC address is valid, if not get a random one
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(&ndev->dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
@@ -942,20 +954,20 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
- prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
+ prop = of_get_property(np, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
- priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
+ if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -970,6 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
+ printk(KERN_INFO "%s: %s MAC %pM\n",
+ ndev->name, op->dev.of_node->full_name, ndev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c40526c78c2..1f17ca0f220 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -104,7 +104,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
unsigned long flags;
int inc;
- inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+ inc = 1000000000 / fep->cycle_speed;
/* grab the ptp lock */
spin_lock_irqsave(&fep->tmreg_lock, flags);
@@ -363,6 +363,8 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
fep->ptp_caps.settime = fec_ptp_settime;
fep->ptp_caps.enable = fec_ptp_enable;
+ fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+
spin_lock_init(&fep->tmreg_lock);
fec_ptp_start_cyclecounter(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 19c54a0acb5..4b5e8a69248 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -133,8 +133,8 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
@@ -231,7 +231,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
dma_addr_t addr;
int i, j, k;
struct gfar_private *priv = netdev_priv(ndev);
- struct device *dev = &priv->ofdev->dev;
+ struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -277,14 +277,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size,
- GFP_KERNEL);
- if (!tx_queue->tx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate tx_skbuff\n");
+ tx_queue->tx_skbuff =
+ kmalloc_array(tx_queue->tx_ring_size,
+ sizeof(*tx_queue->tx_skbuff),
+ GFP_KERNEL);
+ if (!tx_queue->tx_skbuff)
goto cleanup;
- }
for (k = 0; k < tx_queue->tx_ring_size; k++)
tx_queue->tx_skbuff[k] = NULL;
@@ -292,15 +290,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size,
- GFP_KERNEL);
-
- if (!rx_queue->rx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate rx_skbuff\n");
+ rx_queue->rx_skbuff =
+ kmalloc_array(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_skbuff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_skbuff)
goto cleanup;
- }
for (j = 0; j < rx_queue->rx_ring_size; j++)
rx_queue->rx_skbuff[j] = NULL;
@@ -349,6 +344,9 @@ static void gfar_init_mac(struct net_device *ndev)
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
@@ -359,8 +357,10 @@ static void gfar_init_mac(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC)
rctrl |= RCTRL_PROM;
- if (ndev->features & NETIF_F_RXCSUM)
+ if (ndev->features & NETIF_F_RXCSUM) {
rctrl |= RCTRL_CHECKSUMMING;
+ priv->uses_rxfcb = 1;
+ }
if (priv->extended_hash) {
rctrl |= RCTRL_EXTHASH;
@@ -382,11 +382,15 @@ static void gfar_init_mac(struct net_device *ndev)
}
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en)
+ if (priv->hwts_rx_en) {
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+ priv->uses_rxfcb = 1;
+ }
- if (ndev->features & NETIF_F_HW_VLAN_RX)
+ if (ndev->features & NETIF_F_HW_VLAN_RX) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 1;
+ }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -505,20 +509,6 @@ void unlock_tx_qs(struct gfar_private *priv)
spin_unlock(&priv->tx_queue[i]->txlock);
}
-static bool gfar_is_vlan_on(struct gfar_private *priv)
-{
- return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
- (priv->ndev->features & NETIF_F_HW_VLAN_TX);
-}
-
-/* Returns 1 if incoming frames use an FCB */
-static inline int gfar_uses_fcb(struct gfar_private *priv)
-{
- return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
-}
-
static void free_tx_pointers(struct gfar_private *priv)
{
int i;
@@ -580,19 +570,11 @@ static int gfar_parse_group(struct device_node *np,
u32 *queue_mask;
int i;
- if (priv->mode == MQ_MG_MODE) {
- for (i = 0; i < GFAR_NUM_IRQS; i++) {
- grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
- GFP_KERNEL);
- if (!grp->irqinfo[i])
- return -ENOMEM;
- }
- } else {
- grp->irqinfo[GFAR_TX] = kzalloc(sizeof(struct gfar_irqinfo),
- GFP_KERNEL);
- if (!grp->irqinfo[GFAR_TX])
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+ grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+ GFP_KERNEL);
+ if (!grp->irqinfo[i])
return -ENOMEM;
- grp->irqinfo[GFAR_RX] = grp->irqinfo[GFAR_ER] = NULL;
}
grp->regs = of_iomap(np, 0);
@@ -676,7 +658,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->dev.of_node;
priv->ndev = dev;
priv->num_tx_queues = num_tx_qs;
@@ -1014,7 +995,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->dev.of_node;
+ priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
@@ -1051,8 +1032,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) regs;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->mtu = 1500;
@@ -1730,13 +1709,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
if (!tx_queue->tx_skbuff[i])
continue;
- dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_single(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
- dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_page(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
@@ -1757,8 +1736,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
+ dma_unmap_single(priv->dev, rxbdp->bufPtr,
+ priv->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
@@ -1797,7 +1776,7 @@ static void free_skb_resources(struct gfar_private *priv)
free_skb_rx_queue(rx_queue);
}
- dma_free_coherent(&priv->ofdev->dev,
+ dma_free_coherent(priv->dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
@@ -2177,7 +2156,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
length,
@@ -2229,7 +2208,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_TOE);
}
- txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+ txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
/* If time stamping is requested one additional TxBD must be set up. The
@@ -2342,10 +2321,13 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
tempval = gfar_read(&regs->rctrl);
/* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER)
+ if (tempval & RCTRL_REQ_PARSER) {
tempval |= RCTRL_PRSDEP_INIT;
- else
+ priv->uses_rxfcb = 1;
+ } else {
tempval &= ~RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 0;
+ }
gfar_write(&regs->rctrl, tempval);
}
@@ -2378,6 +2360,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
tempval = gfar_read(&regs->rctrl);
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&regs->rctrl, tempval);
+ priv->uses_rxfcb = 1;
} else {
/* Disable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl);
@@ -2401,15 +2384,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
- if (gfar_is_vlan_on(priv))
- frame_size += VLAN_HLEN;
-
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
- if (gfar_uses_fcb(priv))
+ if (priv->uses_rxfcb)
frame_size += GMAC_FCB_LEN;
frame_size += priv->padding;
@@ -2542,7 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
} else
buflen = bdp->length;
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2561,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_page(priv->dev, bdp->bufPtr,
bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2627,7 +2607,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct gfar_private *priv = netdev_priv(dev);
dma_addr_t buf;
- buf = dma_map_single(&priv->ofdev->dev, skb->data,
+ buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
gfar_init_rxbdp(rx_queue, bdp, buf);
}
@@ -2661,7 +2641,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
- estats->rx_trunc++;
+ atomic64_inc(&estats->rx_trunc);
return;
}
@@ -2670,20 +2650,20 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
stats->rx_length_errors++;
if (status & RXBD_LARGE)
- estats->rx_large++;
+ atomic64_inc(&estats->rx_large);
else
- estats->rx_short++;
+ atomic64_inc(&estats->rx_short);
}
if (status & RXBD_NONOCTET) {
stats->rx_frame_errors++;
- estats->rx_nonoctet++;
+ atomic64_inc(&estats->rx_nonoctet);
}
if (status & RXBD_CRCERR) {
- estats->rx_crcerr++;
+ atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
if (status & RXBD_OVERRUN) {
- estats->rx_overrun++;
+ atomic64_inc(&estats->rx_overrun);
stats->rx_crc_errors++;
}
}
@@ -2708,8 +2688,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi)
{
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
@@ -2756,10 +2736,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Send the packet up the stack */
ret = napi_gro_receive(napi, skb);
- if (GRO_DROP == ret)
- priv->extra_stats.kernel_dropped++;
-
- return 0;
+ if (unlikely(GRO_DROP == ret))
+ atomic64_inc(&priv->extra_stats.kernel_dropped);
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2780,7 +2758,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
+ amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2792,7 +2770,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
@@ -2825,7 +2803,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
rx_queue->stats.rx_dropped++;
- priv->extra_stats.rx_skbmissing++;
+ atomic64_inc(&priv->extra_stats.rx_skbmissing);
}
}
@@ -3258,7 +3236,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
- priv->extra_stats.tx_underrun++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
local_irq_save(flags);
lock_tx_qs(priv);
@@ -3273,7 +3251,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BSY) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_bsy++;
+ atomic64_inc(&priv->extra_stats.rx_bsy);
gfar_receive(irq, grp_id);
@@ -3282,19 +3260,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_babr++;
+ atomic64_inc(&priv->extra_stats.rx_babr);
netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
- priv->extra_stats.eberr++;
+ atomic64_inc(&priv->extra_stats.eberr);
netif_dbg(priv, rx_err, dev, "bus error\n");
}
if (events & IEVENT_RXC)
netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
- priv->extra_stats.tx_babt++;
+ atomic64_inc(&priv->extra_stats.tx_babt);
netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 71793f4fca3..63a28d294e2 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -627,34 +627,29 @@ struct rmon_mib
};
struct gfar_extra_stats {
- u64 kernel_dropped;
- u64 rx_large;
- u64 rx_short;
- u64 rx_nonoctet;
- u64 rx_crcerr;
- u64 rx_overrun;
- u64 rx_bsy;
- u64 rx_babr;
- u64 rx_trunc;
- u64 eberr;
- u64 tx_babt;
- u64 tx_underrun;
- u64 rx_skbmissing;
- u64 tx_timeout;
+ atomic64_t kernel_dropped;
+ atomic64_t rx_large;
+ atomic64_t rx_short;
+ atomic64_t rx_nonoctet;
+ atomic64_t rx_crcerr;
+ atomic64_t rx_overrun;
+ atomic64_t rx_bsy;
+ atomic64_t rx_babr;
+ atomic64_t rx_trunc;
+ atomic64_t eberr;
+ atomic64_t tx_babt;
+ atomic64_t tx_underrun;
+ atomic64_t rx_skbmissing;
+ atomic64_t tx_timeout;
};
#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
-#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+#define GFAR_EXTRA_STATS_LEN \
+ (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
-/* Number of stats in the stats structure (ignore car and cam regs)*/
+/* Number of stats exported via ethtool */
#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
-struct gfar_stats {
- u64 extra[GFAR_EXTRA_STATS_LEN];
- u64 rmon[GFAR_RMON_LEN];
-};
-
-
struct gfar {
u32 tsec_id; /* 0x.000 - Controller ID register */
u32 tsec_id2; /* 0x.004 - Controller ID2 register */
@@ -1054,28 +1049,65 @@ enum gfar_errata {
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
-
- /* Indicates how many tx, rx queues are enabled */
- unsigned int num_tx_queues;
unsigned int num_rx_queues;
- unsigned int num_grps;
- unsigned int mode;
-
- /* The total tx and rx ring size for the enabled queues */
- unsigned int total_tx_ring_size;
- unsigned int total_rx_ring_size;
- struct device_node *node;
+ struct device *dev;
struct net_device *ndev;
- struct platform_device *ofdev;
enum gfar_errata errata;
+ unsigned int rx_buffer_size;
+
+ u16 uses_rxfcb;
+ u16 padding;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
- struct gfar_priv_grp gfargrp[MAXGROUPS];
struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+ u32 device_flags;
+
+ unsigned int mode;
+ unsigned int num_tx_queues;
+ unsigned int num_grps;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ struct platform_device *ofdev;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ /* Wake-on-LAN enabled */
+ wol_en:1,
+ /* Enable priorty based Tx scheduling in Hw */
+ prio_sched_en:1;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
/* RX per device parameters */
- unsigned int rx_buffer_size;
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1094,39 +1126,6 @@ struct gfar_private {
unsigned int fifo_starve;
unsigned int fifo_starve_off;
- /* Bitfield update lock */
- spinlock_t bflock;
-
- phy_interface_t interface;
- struct device_node *phy_node;
- struct device_node *tbi_node;
- u32 device_flags;
- unsigned char
- extended_hash:1,
- bd_stash_en:1,
- rx_filer_enable:1,
- wol_en:1, /* Wake-on-LAN enabled */
- prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
- unsigned short padding;
-
- /* PHY stuff */
- struct phy_device *phydev;
- struct mii_bus *mii_bus;
- int oldspeed;
- int oldduplex;
- int oldlink;
-
- uint32_t msg_enable;
-
- struct work_struct reset_task;
-
- /* Network Statistics */
- struct gfar_extra_stats extra_stats;
-
- /* HW time stamping enabled flag */
- int hwts_rx_en;
- int hwts_tx_en;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 45e59d5c071..75e89acf491 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -149,20 +149,17 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
int i;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u64 *extra = (u64 *) & priv->extra_stats;
+ atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = atomic64_read(&extra[i]);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
- struct gfar_stats *stats = (struct gfar_stats *) buf;
-
- for (i = 0; i < GFAR_RMON_LEN; i++)
- stats->rmon[i] = (u64) gfar_read(&rmon[i]);
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- stats->extra[i] = extra[i];
- } else
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- buf[i] = extra[i];
+ for (; i < GFAR_STATS_LEN; i++, rmon++)
+ buf[i] = (u64) gfar_read(rmon);
+ }
}
static int gfar_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 91977d9c1b4..328f47c92e2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -76,16 +76,16 @@ MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
"Default = 1");
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 2b6cd02bfba..26d9cd59ec7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#define E1000_MAX_INTR 10
+#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 256
-#define E1000_MIN_TXD 48
-#define E1000_MAX_82544_TXD 4096
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 256
-#define E1000_MIN_RXD 48
-#define E1000_MAX_82544_RXD 4096
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128 128 /* Used for packet split */
-#define E1000_RXBUFFER_256 256 /* Used for packet split */
-#define E1000_RXBUFFER_512 512
-#define E1000_RXBUFFER_1024 1024
-#define E1000_RXBUFFER_2048 2048
-#define E1000_RXBUFFER_4096 4096
-#define E1000_RXBUFFER_8192 8192
-#define E1000_RXBUFFER_16384 16384
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX 15
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 14e30515f6a..43462d596a4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
- * and HALF_DUPLEX != DUPLEX_HALF */
-
+ * and HALF_DUPLEX != DUPLEX_HALF
+ */
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
- ETH_TP_MDI_X :
- ETH_TP_MDI);
+ ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg;
else
hw->autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
+ /* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
+ eeprom->len);
kfree(eeprom_buff);
return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
+ GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
- rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
+ GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx;
/* save the new, restore the old in order to free it,
- * then restore the new back again */
+ * then restore the new back again
+ */
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
-
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
-
} else {
-
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
-
}
value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
- /* NOTE: we don't test MSI interrupts here, yet */
- /* Hook up test interrupt handler just for this test */
+ /* NOTE: we don't test MSI interrupts here, yet
+ * Hook up test interrupt handler just for this test
+ */
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
- netdev))
+ netdev))
shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
- * duplex link is detected. */
+ * duplex link is detected.
+ */
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
- 1024);
+ 1024);
if (!ret_val)
good_cnt++;
if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
+ * could take as long as 2-3 minutes
+ */
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
+ * so exclude FUNC_1 ports from having WoL enabled
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
+ * supported by this hardware
+ */
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-/* BUG_ON(i != E1000_STATS_LEN); */
+/* BUG_ON(i != E1000_STATS_LEN); */
}
static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
-/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .set_phys_id = e1000_set_phys_id,
- .get_ethtool_stats = e1000_get_ethtool_stats,
- .get_sset_count = e1000_get_sset_count,
- .get_coalesce = e1000_get_coalesce,
- .set_coalesce = e1000_set_coalesce,
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = e1000_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8fedd245153..2879b9631e1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
if (hw->phy_init_script) {
msleep(20);
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of this routine. */
+ /* Save off the current value of register 0x2F5B to be restored
+ * at the end of this routine.
+ */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
case e1000_82541:
case e1000_82541_rev_2:
/* These controllers can't ack the 64-bit write when issuing the
- * reset, so use IO-mapping as a workaround to issue the reset */
+ * reset, so use IO-mapping as a workaround to issue the reset
+ */
E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
break;
}
- /* After MAC reset, force reload of EEPROM to restore power-on settings to
- * device. Later controllers reload the EEPROM automatically, so just wait
- * for reload to complete.
+ /* After MAC reset, force reload of EEPROM to restore power-on settings
+ * to device. Later controllers reload the EEPROM automatically, so
+ * just wait for reload to complete.
*/
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
msleep(5);
}
- /* Setup the receive address. This involves initializing all of the Receive
- * Address Registers (RARs 0 - 15).
+ /* Setup the receive address. This involves initializing all of the
+ * Receive Address Registers (RARs 0 - 15).
*/
e1000_init_rx_addrs(hw);
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
- * occurring when accessing our register space */
+ * occurring when accessing our register space
+ */
E1000_WRITE_FLUSH();
}
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
case e1000_82546_rev_3:
break;
default:
- /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ /* Workaround for PCI-X problem when BIOS sets MMRBC
+ * incorrectly.
+ */
if (hw->bus_type == e1000_bus_type_pcix
&& e1000_pcix_get_mmrbc(hw) > 2048)
e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
- * error crash in a PCI slot. */
+ * error crash in a PCI slot.
+ */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
}
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ew32(FCRTL, 0);
ew32(FCRTH, 0);
} else {
- /* We need to set up the Receive Threshold high and low water marks
- * as well as (optionally) enabling the transmission of XON frames.
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
*/
if (hw->fc_send_xon) {
ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e1000_config_collision_dist(hw);
/* Check for a software override of the flow control settings, and setup
- * the device accordingly. If auto-negotiation is enabled, then software
- * will have to set the "PAUSE" bits to the correct value in the Tranmsit
- * Config Word Register (TXCW) and re-start auto-negotiation. However, if
- * auto-negotiation is disabled, then software will have to manually
- * configure the two flow control enable bits in the CTRL register.
+ * the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Tranmsit Config Word Register (TXCW) and re-start
+ * auto-negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
*
* The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames, but
- * not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do
- * not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
*/
switch (hw->fc) {
case E1000_FC_NONE:
- /* Flow control is completely disabled by a software over-ride. */
+ /* Flow ctrl is completely disabled by a software over-ride */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case E1000_FC_RX_PAUSE:
- /* RX Flow control is enabled and TX Flow control is disabled by a
- * software over-ride. Since there really isn't a way to advertise
- * that we are capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
+ /* Rx Flow control is enabled and Tx Flow control is disabled by
+ * a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case E1000_FC_TX_PAUSE:
- /* TX Flow control is enabled, and RX Flow control is disabled, by a
- * software over-ride.
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case E1000_FC_FULL:
- /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
- /* Since auto-negotiation is enabled, take the link out of reset (the link
- * will be in reset, because we previously reset the chip). This will
- * restart auto-negotiation. If auto-negotiation is successful then the
- * link-up status bit will be set and the flow control enable bits (RFCE
- * and TFCE) will be set according to their negotiated value.
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
*/
e_dbg("Auto-negotiation enabled\n");
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
hw->txcw = txcw;
msleep(1);
- /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
- * indication in the Device Status Register. Time-out if a link isn't
- * seen in 500 milliseconds seconds (Auto-negotiation should complete in
- * less than 500 milliseconds even if the other end is doing it in SW).
- * For internal serdes, we just assume a signal is present, then poll.
+ /* If we have a signal (the cable is plugged in) then poll for a
+ * "Link-Up" indication in the Device Status Register. Time-out if a
+ * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
+ * complete in less than 500 milliseconds even if the other end is doing
+ * it in SW). For internal serdes, we just assume a signal is present,
+ * then poll.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
- * e1000_check_for_link. This routine will force the link up if
- * we detect a signal. This will allow us to communicate with
- * non-autonegotiating link partners.
+ * e1000_check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
- /* With 82543, we need to force speed and duplex on the MAC equal to what
- * the PHY speed and duplex configuration is. In addition, we need to
- * perform a hardware reset on the PHY to take it out of reset.
+ /* With 82543, we need to force speed and duplex on the MAC equal to
+ * what the PHY speed and duplex configuration is. In addition, we need
+ * to perform a hardware reset on the PHY to take it out of reset.
*/
if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default. */
+ * resolution as hardware default.
+ */
if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (hw->autoneg) {
/* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ * and perform autonegotiation
+ */
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
- * depending on value from forced_speed_duplex. */
+ * depending on value from forced_speed_duplex.
+ */
e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start
+ * auto-negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the
- *hw's ability to send PAUSE frames.
+ * hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Are we forcing Full or Half Duplex? */
if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_10_full) {
- /* We want to force full duplex so we SET the full duplex bits in the
- * Device and MII Control Registers.
+ /* We want to force full duplex so we SET the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n");
} else {
- /* We want to force half duplex so we CLEAR the full duplex bits in
- * the Device and MII Control Registers.
+ /* We want to force half duplex so we CLEAR the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed are duplex are forced.
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
+ * MDI forced whenever speed are duplex are forced.
*/
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ /* Wait for autoneg to complete or 4.5 seconds to expire */
for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
msleep(100);
}
if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
- /* We didn't get link. Reset the DSP and wait again for link. */
+ /* We didn't get link. Reset the DSP and wait again
+ * for link.
+ */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
- /* This loop will early-out if the link condition has been met. */
+ /* This loop will early-out if the link condition has been
+ * met
+ */
for (i = PHY_FORCE_TIME; i > 0; i--) {
if (mii_status_reg & MII_SR_LINK_STATUS)
break;
msleep(100);
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_m88) {
- /* Because we reset the PHY above, we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock. This value
- * defaults back to a 2.5MHz clock when the PHY is reset.
+ /* Because we reset the PHY above, we need to re-force TX_CLK in
+ * the Extended PHY Specific Control Register to 25MHz clock.
+ * This value defaults back to a 2.5MHz clock when the PHY is
+ * reset.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* In addition, because of the s/w reset above, we need to enable CRS on
- * TX. This must be set for both full and half duplex operation.
+ /* In addition, because of the s/w reset above, we need to
+ * enable CRS on Tx. This must be set for both full and half
+ * duplex operation.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
- * MAC speed/duplex configuration.*/
+ * MAC speed/duplex configuration.
+ */
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ &phy_data);
if (ret_val)
return ret_val;
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) ==
- M88E1000_PSSR_100MBS)
+ M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
}
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register
- * (Address 4) and the Auto_Negotiation Base Page Ability
- * Register (Address 5) to determine how flow control was
- * negotiated.
+ * (Address 4) and the Auto_Negotiation Base Page
+ * Ability Register (Address 5) to determine how flow
+ * control was negotiated.
*/
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
+ /* Two bits in the Auto Negotiation Advertisement
+ * Register (Address 4) and two bits in the Auto
+ * Negotiation Base Page Ability Register (Address 5)
+ * determine flow control for both the PHY and the link
+ * partner. The following table, taken out of the IEEE
+ * 802.3ab/D6.0 dated March 25, 1999, describes these
+ * PAUSE resolution bits and how flow control is
+ * determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 0 | DC | DC | E1000_FC_NONE
* 0 | 1 | 0 | DC | E1000_FC_NONE
* 0 | 1 | 1 | 0 | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | DC | 1 | DC | E1000_FC_FULL
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ /* Now we need to check if the user selected Rx
+ * ONLY of pause frames. In this case, we had
+ * to advertise FULL flow control because we
+ * could not advertise Rx ONLY. Hence, we must
+ * now check to see if we need to turn OFF the
+ * TRANSMISSION of PAUSE frames.
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
*
*/
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
*
*/
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* Per the IEEE spec, at this point flow control should
+ * be disabled. However, we want to consider that we
+ * could be connected to a legacy switch that doesn't
+ * advertise desired flow control, but can be forced on
+ * the link partner. So if we advertised no flow
+ * control, that is what we will resolve to. If we
+ * advertised some kind of receive capability (Rx Pause
+ * Only or Full Flow Control) and the link partner
+ * advertised none, we will configure ourselves to
+ * enable Rx Flow Control only. We can do this safely
+ * for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx,
+ * no harm done since we won't be receiving any PAUSE
+ * frames anyway. If the intent on the link partner was
+ * to have flow control enabled, then by us enabling Rx
+ * only, we can at least receive pause frames and
+ * process them. This is a good idea because in most
+ * cases, since we are predominantly a server NIC, more
+ * times than not we will be asked to delay transmission
+ * of packets than asking our link partner to pause
+ * transmission of frames.
*/
else if ((hw->original_fc == E1000_FC_NONE ||
hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = false;
- /* Check if there was DownShift, must be checked immediately after
- * link-up */
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
e1000_check_downshift(hw);
/* If we are on 82544 or 82543 silicon and speed/duplex
- * are forced to 10H or 10F, then we will implement the polarity
- * reversal workaround. We disable interrupts first, and upon
- * returning, place the devices interrupt state to its previous
- * value except for the link status change interrupt which will
+ * are forced to 10H or 10F, then we will implement the
+ * polarity reversal workaround. We disable interrupts
+ * first, and upon returning, place the devices
+ * interrupt state to its previous value except for the
+ * link status change interrupt which will
* happen due to the execution of this workaround.
*/
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
}
- /* Configure Flow Control now that Auto-Neg has completed. First, we
- * need to restore the desired flow control settings because we may
- * have had to re-autoneg with a different link partner.
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control settings
+ * because we may have had to re-autoneg with a different link
+ * partner.
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
/* At this point we know that we are on copper and we have
- * auto-negotiated link. These are conditions for checking the link
- * partner capability register. We use the link speed to determine if
- * TBI compatibility needs to be turned on or off. If the link is not
- * at gigabit speed, then TBI compatibility is not needed. If we are
- * at gigabit speed, we turn on TBI compatibility.
+ * auto-negotiated link. These are conditions for checking the
+ * link partner capability register. We use the link speed to
+ * determine if TBI compatibility needs to be turned on or off.
+ * If the link is not at gigabit speed, then TBI compatibility
+ * is not needed. If we are at gigabit speed, we turn on TBI
+ * compatibility.
*/
if (hw->tbi_compatibility_en) {
u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
return ret_val;
}
if (speed != SPEED_1000) {
- /* If link speed is not set to gigabit speed, we do not need
- * to enable TBI compatibility.
+ /* If link speed is not set to gigabit speed, we
+ * do not need to enable TBI compatibility.
*/
if (hw->tbi_compatibility_on) {
- /* If we previously were in the mode, turn it off. */
+ /* If we previously were in the mode,
+ * turn it off.
+ */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP;
ew32(RCTL, rctl);
hw->tbi_compatibility_on = false;
}
} else {
- /* If TBI compatibility is was previously off, turn it on. For
- * compatibility with a TBI link partner, we will store bad
- * packets. Some frames have an additional byte on the end and
+ /* If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
* will look like CRC errors to to the hardware.
*/
if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
*duplex = FULL_DUPLEX;
}
- /* IGP01 PHY may advertise full duplex operation after speed downgrade even
- * if it is operating at half duplex. Here we set the duplex settings to
- * match the duplex in the link partner's capabilities.
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade
+ * even if it is operating at half duplex. Here we set the duplex
+ * settings to match the duplex in the link partner's capabilities.
*/
if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
*/
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Raise the clock input to the Management Data Clock (by setting the MDC
- * bit), and then delay 10 microseconds.
+ /* Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
*/
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Lower the clock input to the Management Data Clock (by clearing the MDC
- * bit), and then delay 10 microseconds.
+ /* Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
while (mask) {
- /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
- * then raising and lowering the Management Data Clock. A "0" is
- * shifted out to the PHY by setting the MDIO bit to "0" and then
- * raising and lowering the clock.
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
*/
if (data & mask)
ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
u8 i;
/* In order to read a register from the PHY, we need to shift in a total
- * of 18 bits from the PHY. The first two bit (turnaround) times are used
- * to avoid contention on the MDIO pin when a read operation is performed.
- * These two bits are ignored by us and thrown away. Bits are "shifted in"
- * by raising the input to the Management Data Clock (setting the MDC bit),
- * and then reading the value of the MDIO bit.
+ * of 18 bits from the PHY. The first two bit (turnaround) times are
+ * used to avoid contention on the MDIO pin when a read operation is
+ * performed. These two bits are ignored by us and thrown away. Bits are
+ * "shifted in" by raising the input to the Management Data Clock
+ * (setting the MDC bit), and then reading the value of the MDIO bit.
*/
ctrl = er32(CTRL);
- /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO;
ew32(CTRL, ctrl);
E1000_WRITE_FLUSH();
- /* Raise and Lower the clock before reading in the data. This accounts for
- * the turnaround bits. The first clock occurred when we clocked out the
- * last bit of the Register Address.
+ /* Raise and Lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked out
+ * the last bit of the Register Address.
*/
e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
+ * Control register. The MAC will take care of interfacing with
+ * the PHY to retrieve the desired data.
*/
if (hw->mac_type == e1000_ce4100) {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*phy_data = (u16) mdic;
}
} else {
- /* We must first send a preamble through the MDIO pin to signal the
- * beginning of an MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We must first send a preamble through the MDIO pin to signal
+ * the beginning of an MII instruction. This is done by sending
+ * 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the next few fields that are required for a read
* operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine five different times. The format of
- * a MII read instruction consists of a shift out of 14 bits and is
- * defined as follows:
+ * e1000_shift_out_mdi_bits routine five different times. The
+ * format of a MII read instruction consists of a shift out of
+ * 14 bits and is defined as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
- * followed by a shift in of 18 bits. This first two bits shifted in
- * are TurnAround bits used to avoid contention on the MDIO pin when a
- * READ operation is performed. These two bits are thrown away
- * followed by a shift in of 16 bits which contains the desired data.
+ * followed by a shift in of 18 bits. This first two bits
+ * shifted in are TurnAround bits used to avoid contention on
+ * the MDIO pin when a READ operation is performed. These two
+ * bits are thrown away followed by a shift in of 16 bits which
+ * contains the desired data.
*/
mdic = ((reg_addr) | (phy_addr << 5) |
(PHY_OP_READ << 10) | (PHY_SOF << 12));
e1000_shift_out_mdi_bits(hw, mdic, 14);
- /* Now that we've shifted out the read command to the MII, we need to
- * "shift in" the 16-bit value (18 total bits) of the requested PHY
- * register address.
+ /* Now that we've shifted out the read command to the MII, we
+ * need to "shift in" the 16-bit value (18 total bits) of the
+ * requested PHY register address.
*/
*phy_data = e1000_shift_in_mdi_bits(hw);
}
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
}
} else {
- /* We'll need to use the SW defined pins to shift the write command
- * out to the PHY. We first send a preamble to the PHY to signal the
- * beginning of the MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We'll need to use the SW defined pins to shift the write
+ * command out to the PHY. We first send a preamble to the PHY
+ * to signal the beginning of the MII instruction. This is done
+ * by sending 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
- /* Now combine the remaining required fields that will indicate a
- * write operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine for each field in the command. The
- * format of a MII write instruction is as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ /* Now combine the remaining required fields that will indicate
+ * a write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the
+ * command. The format of a MII write instruction is as follows:
+ * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
*/
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
- /* Read the device control register and assert the E1000_CTRL_PHY_RST
- * bit. Then, take it out of reset.
+ /* Read the device control register and assert the
+ * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
* For e1000 hardware, we delay for 10ms between the assert
- * and deassert.
+ * and de-assert.
*/
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH();
} else {
- /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
- * bit to put the PHY into reset. Then, take it out of reset.
+ /* Read the Extended Device Control Register, assert the
+ * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
+ * out of reset.
*/
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
/* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ /* Local/Remote Receiver Information are only valid @ 1000
+ * Mbps
+ */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
}
if (eeprom->type == e1000_eeprom_spi) {
- /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
- * 32KB (incremented by powers of 2).
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
+ * 128B to 32KB (incremented by powers of 2).
*/
/* Set to default value for initial eeprom read. */
eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom_size =
(eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we
- * bump eeprom_size up one to ensure that "1" (which maps to 256B)
- * is never the result used in the shifting logic below. */
+ * bump eeprom_size up one to ensure that "1" (which maps to
+ * 256B) is never the result used in the shifting logic below.
+ */
if (eeprom_size)
eeprom_size++;
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
*/
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+ * then wait 50 microseconds.
*/
*eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
eecd |= E1000_EECD_DO;
}
do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK bit
+ * controls the clock input to the EEPROM). A "0" is shifted
+ * out to the EEPROM by setting "DI" to "0" and then raising and
+ * then lowering the clock.
*/
eecd &= ~E1000_EECD_DI;
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
/* In order to read a register from the EEPROM, we need to shift 'count'
* bits in from the EEPROM. Bits are "shifted in" by raising the clock
- * input to the EEPROM (setting the SK bit), and then reading the value of
- * the "DO" bit. During this "shifting in" process the "DI" bit should
- * always be clear.
+ * input to the EEPROM (setting the SK bit), and then reading the value
+ * of the "DO" bit. During this "shifting in" process the "DI" bit
+ * should always be clear.
*/
eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM;
/* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
- * acquired the EEPROM at this point, so any returns should release it */
+ * acquired the EEPROM at this point, so any returns should release it
+ */
if (eeprom->type == e1000_eeprom_spi) {
u16 word_in;
u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
eeprom->address_bits);
- /* Read the data. The address of the eeprom internally increments with
- * each byte (spi) being read, saving on the overhead of eeprom setup
- * and tear-down. The address counter will roll over if reading beyond
- * the size of the eeprom, thus allowing the entire memory to be read
- * starting from any offset. */
+ /* Read the data. The address of the eeprom internally
+ * increments with each byte (spi) being read, saving on the
+ * overhead of eeprom setup and tear-down. The address counter
+ * will roll over if reading beyond the size of the eeprom, thus
+ * allowing the entire memory to be read starting from any
+ * offset.
+ */
for (i = 0; i < words; i++) {
word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset + i),
eeprom->address_bits);
- /* Read the data. For microwire, each word requires the overhead
- * of eeprom setup and tear-down. */
+ /* Read the data. For microwire, each word requires the
+ * overhead of eeprom setup and tear-down.
+ */
data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw);
}
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
/* Send the data */
- /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ /* Loop to allow for up to whole page write (32 bytes) of
+ * eeprom
+ */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_ee_bits(hw, word_out, 16);
widx++;
- /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
- * operation, while the smaller eeproms are capable of an 8-byte
- * PAGE WRITE operation. Break the inner loop to pass new address
+ /* Some larger eeprom sizes are capable of a 32-byte
+ * PAGE WRITE operation, while the smaller eeproms are
+ * capable of an 8-byte PAGE WRITE operation. Break the
+ * inner loop to pass new address
*/
if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Send the data */
e1000_shift_out_ee_bits(hw, data[words_written], 16);
- /* Toggle the CS line. This in effect tells the EEPROM to execute
- * the previous command.
+ /* Toggle the CS line. This in effect tells the EEPROM to
+ * execute the previous command.
*/
e1000_standby_eeprom(hw);
- /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
+ /* Read DO repeatedly until it is high (equal to '1'). The
+ * EEPROM will signal that the command has been completed by
+ * raising the DO signal. If DO does not go high in 10
+ * milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the
- * manageability unit */
+ * manageability unit
+ */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
- /* This packet should not be counted as a CRC error. */
+ /* This packet should not be counted as a CRC error. */
stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
+ /* This packet does count as a Good Packet Received. */
stats->gprc++;
- /* Adjust the Good Octets received counters */
+ /* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
- * find the polarity status */
+ /* If speed is 1000 Mbps, must read the
+ * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
+ */
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity_reversed :
e1000_rev_polarity_normal;
} else {
- /* For 10 Mbps, read the polarity bit in the status register. (for
- * 100 Mbps this bit is always 0) */
+ /* For 10 Mbps, read the polarity bit in the status
+ * register. (for 100 Mbps this bit is always 0)
+ */
*polarity =
(phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
} else {
if (hw->dsp_config_state == e1000_dsp_config_activated) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
if (hw->ffe_config_state == e1000_ffe_config_active) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
- * from the lowest speeds starting from 10Mbps. The capability is used for
- * Dx transitions and states */
+ * from the lowest speeds starting from 10Mbps. The capability is used
+ * for Dx transitions and states
+ */
if (hw->mac_type == e1000_82541_rev_2
|| hw->mac_type == e1000_82547_rev_2) {
ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return ret_val;
}
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val =
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index b20fff13437..8502c625dbe 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
-
static int __init e1000_init_module(void)
{
int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed
* from memory.
**/
-
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
+ * next_to_use != next_to_clean
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
- E1000_DESC_UNUSED(ring));
+ E1000_DESC_UNUSED(ring));
}
}
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset ***
- *
**/
-
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
- * settings across a power-down/up cycle */
+ * settings across a power-down/up cycle
+ */
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
- * (c) SoL/IDER session is active */
+ * (c) SoL/IDER session is active
+ */
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
- /*
- * Setting DOWN must be after irq_disable to prevent
+ /* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling.
*/
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
- * expressed in KB. */
+ * expressed in KB.
+ */
pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the tx fifo also stores 16 bytes of information about the tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
+ * allocation, take space away from current Rx allocation
+ */
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break;
}
- /* if short on rx space, rx wins and must trump tx
- * adjustment or use Early Receive if available */
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
- /*
- * flow control settings:
+ /* flow control settings:
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
- * can cause a loss of link at power off or driver unload */
+ * can cause a loss of link at power off or driver unload
+ */
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /*
- * there is a workaround being applied below that limits
+ /* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /*
- * according to DMA-API-HOWTO, coherent calls will always
+ /* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* before reading the EEPROM, reset the controller to
- * put the device in a known good starting state */
+ * put the device in a known good starting state
+ */
e1000_reset_hw(hw);
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
- /*
- * set MAC address to all zeroes to invalidate and temporary
+ /* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the
@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
- * wake on lan on a particular port */
+ * wake on lan on a particular port
+ */
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
+ * regardless of eeprom setting
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
@@ -1270,7 +1267,6 @@ err_pci_reg:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-
static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function
**/
-
static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
-
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-
static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
+ * clean_rx handler before we do so.
+ */
e1000_configure(adapter);
err = e1000_request_irq(adapter);
@@ -1444,7 +1438,6 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with
- * the same ID is registered on the host OS (let 8021q kill it) */
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
if ((hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
- * write location to cross 64k boundary due to errata 23 */
+ * write location to cross 64k boundary due to errata 23
+ */
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
*
* Return 0 on success, negative on failure
**/
-
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr)
{
@@ -1509,11 +1503,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -1577,7 +1568,6 @@ setup_tx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1602,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
*
* Configure the Tx unit of the MAC after a reset.
**/
-
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
@@ -1623,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0);
ew32(TDH, 0);
- adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
- adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1679,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
- * need this to apply a workaround later in the send path. */
+ * need this to apply a workaround later in the send path.
+ */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
@@ -1695,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
@@ -1704,11 +1695,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
desc_len = sizeof(struct e1000_rx_desc);
@@ -1777,7 +1765,6 @@ setup_rx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1846,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1868,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*
* Configure the Rx unit of the MAC after a reset.
**/
-
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
@@ -1901,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ * the Base and Length of the Rx Descriptor Ring
+ */
switch (adapter->num_rx_queues) {
case 1:
default:
@@ -1911,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
- adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
- adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDT : E1000_82542_RDT);
break;
}
@@ -1938,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*
* Free all transmit software resources
**/
-
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -1961,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
*
* Free all transmit software resources
**/
-
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -1996,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -2032,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2048,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
*
* Free all receive software resources
**/
-
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2071,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
*
* Free all receive software resources
**/
-
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2085,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2144,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2204,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2239,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-
static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2252,10 +2232,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
int mta_reg_count = E1000_NUM_MTA_REGISTERS;
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
- if (!mcarray) {
- e_err(probe, "memory allocation failed\n");
+ if (!mcarray)
return;
- }
/* Check for Promiscuous and All Multicast modes */
@@ -2325,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
}
/* write the hash table completely, write from bottom to avoid
- * both stupid write combining chipsets, and flushing each write */
+ * both stupid write combining chipsets, and flushing each write
+ */
for (i = mta_reg_count - 1; i >= 0 ; i--) {
- /*
- * If we are on an 82544 has an errata where writing odd
+ /* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always
* writing the odd offset first
@@ -2466,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
bool txb2b = true;
/* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
ctrl = er32(CTRL);
pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2541,7 +2519,8 @@ link_up:
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
+ * (Do the reset outside of interrupt context).
+ */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
@@ -2551,8 +2530,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -2667,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2704,10 +2680,11 @@ set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
- * increasing */
+ * increasing
+ */
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
@@ -2869,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully
- * DMA'd to the controller */
+ * DMA'd to the controller
+ */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = false;
@@ -2877,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
+ * in TSO mode. Append 4-byte sentinel desc
+ */
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
/* work-around for errata 10 and it applies
@@ -2890,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid
- * terminating buffers within evenly-aligned dwords. */
+ * terminating buffers within evenly-aligned dwords.
+ */
if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
@@ -2902,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2933,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ * in TSO mode. Append 4-byte sentinel desc
+ */
+ if (unlikely(mss && f == (nr_frags-1) &&
+ size == len && size > 8))
size -= 4;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
- * dwords. */
+ * dwords.
+ */
bufend = (unsigned long)
page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1;
@@ -3002,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3043,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
}
@@ -3098,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3113,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
}
static int e1000_maybe_stop_tx(struct net_device *netdev,
- struct e1000_tx_ring *tx_ring, int size)
+ struct e1000_tx_ring *tx_ring, int size)
{
if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -3137,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso;
unsigned int f;
- /* This goes back to the question of how to logically map a tx queue
+ /* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
+ * if using multiple Tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again.
+ */
tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) {
@@ -3165,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
- * drops. */
+ * drops.
+ */
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
@@ -3181,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements
* NOTE: this is a TSO only workaround
* if end byte alignment not correct move us
- * into the next dword */
- if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ * into the next dword
+ */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1)
+ & 4)
break;
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
@@ -3230,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching
- * head, otherwise try next time */
+ * head, otherwise try next time
+ */
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
@@ -3269,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
- nr_frags, mss);
+ nr_frags, mss);
if (count) {
netdev_sent_queue(netdev, skb->len);
@@ -3371,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */
e1000_regdump(adapter);
- /*
- * transmit dump
- */
+ /* transmit dump */
pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3434,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
}
rx_ring_summary:
- /*
- * receive dump
- */
+ /* receive dump */
pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format
@@ -3501,7 +3489,6 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-
static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3529,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog.
**/
-
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
/* only return the current stats */
@@ -3543,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3580,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs */
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3616,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-
void e1000_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3627,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -3718,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC
+ */
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -3772,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-
static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
@@ -3783,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
- /*
- * we might have caused the interrupt, but the above
+ /* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is
* down there is nothing to do so return handled
*/
@@ -3810,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi);
} else {
/* this really should not happen! if it does it is basically a
- * bug, but not a hard error, so enable ints and continue */
+ * bug, but not a hard error, so enable ints and continue
+ */
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
@@ -3824,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+ napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3915,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of i
+ */
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
@@ -3962,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
-
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
{
@@ -3998,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -4094,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4106,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
- * too */
+ * too
+ */
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
@@ -4122,7 +4108,7 @@ process_skb:
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
@@ -4140,38 +4126,42 @@ process_skb:
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr, length);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += (skb->len - 4); /* don't count FCS */
if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4213,8 +4203,7 @@ next_desc:
return cleaned;
}
-/*
- * this should improve performance for small packets with large amounts
+/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
@@ -4318,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
- flags);
+ flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4385,10 +4374,9 @@ next_desc:
* @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
-
static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring, int cleaned_count)
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -4429,7 +4417,7 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
+ buffer_info->page, 0,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4459,7 +4447,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
@@ -4469,7 +4458,6 @@ check_page:
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
-
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count)
@@ -4540,8 +4528,7 @@ map_skb:
break; /* while !buffer_info->skb */
}
- /*
- * XXX if it was allocated cleanly it will never map to a
+ /* XXX if it was allocated cleanly it will never map to a
* boundary crossing
*/
@@ -4579,7 +4566,8 @@ map_skb:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
@@ -4589,7 +4577,6 @@ map_skb:
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
**/
-
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4602,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
- * we assume back-to-back */
+ * we assume back-to-back
+ */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4615,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
adapter->smartspeed++;
if (!e1000_phy_setup_autoneg(hw) &&
!e1000_read_phy_reg(hw, PHY_CTRL,
- &phy_ctrl)) {
+ &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4646,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq:
* @cmd:
**/
-
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -4665,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq:
* @cmd:
**/
-
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
@@ -4927,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -5130,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 750fc0194f3..c9cde352b1c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
- .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD
}}
};
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_int_delay = opt.def;
}
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break;
case 4:
e_dev_info("%s set to simplified "
- "(2000-8000) ints mode\n", opt.name);
+ "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
- * used as control */
+ * used as control
+ */
adapter->itr_setting = adapter->itr & ~3;
break;
}
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on fiber adapters
**/
-
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on copper adapters
**/
-
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_100_HALF;
+ ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
- ADVERTISE_100_FULL |
- ADVERTISE_1000_FULL;
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL;
+ ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a00457abce6..e0991388664 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -32,69 +32,6 @@
#include "e1000.h"
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
-
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
-
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
-
-#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
-
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
-
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
-
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
- /* 1=Reverse Auto-Negotiation */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK 0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
- 1 = 50-80M
- 2 = 80-110M
- 3 = 110-140M
- 4 = >140M
- */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
-
-/* Max number of times Kumeran read/write should be validated */
-#define GG82563_MAX_KMRN_RETRY 0x5
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
- /* 1=Enable SERDES Electrical Idle */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-
/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
new file mode 100644
index 00000000000..90d363b2d28
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_80003ES2LAN_H_
+#define _E1000E_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index cf86090edd3..2faffbde179 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -44,21 +44,6 @@
#include "e1000.h"
-#define ID_LED_RESERVED_F746 0xF746
-#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_OFF1_ON2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
-#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
-#define E1000_BASE1000T_STATUS 10
-#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
-#define E1000_RECEIVE_ERROR_COUNTER 21
-#define E1000_RECEIVE_ERROR_MAX 0xFFFF
-
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -1549,7 +1534,6 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
-
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
new file mode 100644
index 00000000000..85cb1a3b7cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_82571_H_
+#define _E1000E_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 615b900c015..fc3a4fe1ac7 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -606,8 +606,6 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
@@ -751,38 +749,6 @@
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
-/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL PHY_REG(772, 20)
-#define I82579_LPI_CTRL_100_ENABLE 0x2000
-#define I82579_LPI_CTRL_1000_ENABLE 0x4000
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
-#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
-
-/* Extended Management Interface (EMI) Registers */
-#define I82579_EMI_ADDR 0x10
-#define I82579_EMI_DATA 0x11
-#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
-#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
-#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
-#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
-#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
-#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
-#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
-#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
-#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
-#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE supported */
-#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
-#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
-#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
-#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
-
-#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
-#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-
/* Bits...
* 15-5: page
* 4-0: register offset
@@ -829,8 +795,4 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
-/* FW Semaphore */
-#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
-#define E1000_FWSM_WLOCK_MAC_SHIFT 7
-
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 4b0bd9c225c..fcc758138b8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -95,70 +95,6 @@ struct e1000_info;
#define DEFAULT_JUMBO 9234
-/* BM/HV Specific Registers */
-#define BM_PORT_CTRL_PAGE 769
-
-#define PHY_UPPER_SHIFT 21
-#define BM_PHY_REG(page, reg) \
- (((reg) & MAX_PHY_REG_ADDRESS) |\
- (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
- (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
-
-/* PHY Wakeup Registers and defines */
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
-#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
-#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
-#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
-#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
-
-#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
-#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
-#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
-#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
-#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
-#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
-#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
-
-#define HV_STATS_PAGE 778
-#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
-#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
-#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
-#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
-#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
-#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
-#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
-#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
-#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
-#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
-
-#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
-
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS 17
-#define BM_CS_STATUS_LINK_UP 0x0400
-#define BM_CS_STATUS_RESOLVED 0x0800
-#define BM_CS_STATUS_SPEED_MASK 0xC000
-#define BM_CS_STATUS_SPEED_1000 0x8000
-
-/* 82577 Mobile Phy Status Register */
-#define HV_M_STATUS 26
-#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
-#define HV_M_STATUS_SPEED_MASK 0x0300
-#define HV_M_STATUS_SPEED_1000 0x0200
-#define HV_M_STATUS_LINK_UP 0x0040
-
-#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
-
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@@ -574,137 +510,6 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
-
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
-
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state);
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
-
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count);
-extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-extern void e1000e_reset_adaptive(struct e1000_hw *hw);
-extern void e1000e_update_adaptive(struct e1000_hw *hw);
-
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success);
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_check_downshift(struct e1000_hw *hw);
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-extern bool e1000_check_phy_82574(struct e1000_hw *hw);
-extern s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
extern void e1000e_ptp_init(struct e1000_adapter *adapter);
extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
@@ -733,15 +538,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
-extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
@@ -776,10 +573,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
return hw->phy.ops.get_info(hw);
}
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-
static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
{
return readl(hw->hw_addr + reg);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index c6c3e921686..2c1813737f6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -130,7 +130,6 @@ static int e1000_get_settings(struct net_device *netdev,
u32 speed;
if (hw->phy.media_type == e1000_media_type_copper) {
-
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -328,12 +327,12 @@ static int e1000_set_settings(struct net_device *netdev,
}
/* reset the link */
-
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
- } else
+ } else {
e1000e_reset(adapter);
+ }
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -1355,7 +1354,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
/* Enable loopback on the PHY */
-#define I82577_PHY_LBK_CTRL 19
e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
break;
default:
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f32b19af9a7..1e6b889aee8 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -29,347 +29,11 @@
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
+#include "regs.h"
#include "defines.h"
struct e1000_hw;
-enum e1e_registers {
- E1000_CTRL = 0x00000, /* Device Control - RW */
- E1000_STATUS = 0x00008, /* Device Status - RO */
- E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
- E1000_EERD = 0x00014, /* EEPROM Read - RW */
- E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
- E1000_FLA = 0x0001C, /* Flash Access - RW */
- E1000_MDIC = 0x00020, /* MDI Control - RW */
- E1000_SCTL = 0x00024, /* SerDes Control - RW */
- E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
- E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
- E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
- E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
- E1000_FCT = 0x00030, /* Flow Control Type - RW */
- E1000_VET = 0x00038, /* VLAN Ether Type - RW */
- E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
- E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
- E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
- E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
- E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
- E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
- E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
- E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
- E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
- E1000_FEXTNVM7 = 0x000E4, /* Future Extended NVM 7 - RW */
- E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
-#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
- E1000_LPIC = 0x000FC, /* Low Power Idle Control - RW */
- E1000_RCTL = 0x00100, /* Rx Control - RW */
- E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
- E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
- E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
- E1000_TCTL = 0x00400, /* Tx Control - RW */
- E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
- E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
- E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
- E1000_LEDCTL = 0x00E00, /* LED Control - RW */
- E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
- E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
- E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
-#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
- E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
- E1000_PBS = 0x01008, /* Packet Buffer Size */
- E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
- E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
- E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
- E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
- E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
- E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
- E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
- E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
- E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL(current_rx_queue)
- */
- E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
-#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
- E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
-#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
- E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
-#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
- E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
-#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
- E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
-#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
- E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
- E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
-#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
-
- E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
- E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
-#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
- E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
-#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
- E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
-#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
- E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
-#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
- E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
-#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
- E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
- E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
-#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
- E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
- E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
-#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
- E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
- E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
- E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
- E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
- E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
- E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
- E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
- E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
- E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
- E1000_COLC = 0x04028, /* Collision Count - R/clr */
- E1000_DC = 0x04030, /* Defer Count - R/clr */
- E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
- E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
- E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
- E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
- E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
- E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
- E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
- E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
- E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
- E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
- E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
- E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
- E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
- E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
- E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
- E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
- E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
- E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
- E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
- E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
- E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
- E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
- E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
- E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
- E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
- E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
- E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
- E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
- E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
- E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
- E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
- E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
- E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
- E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
- E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
- E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
- E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
- E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
- E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
- E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
- E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
- E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
- E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
- E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
- E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
- E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
- E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
- E1000_IAC = 0x04100, /* Interrupt Assertion Count */
- E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
- E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
- E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
- E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
- E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
- E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
- E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
- E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
- E1000_PCS_LCTL = 0x04208, /* PCS Link Control - RW */
- E1000_PCS_LSTAT = 0x0420C, /* PCS Link Status - RO */
- E1000_PCS_ANADV = 0x04218, /* AN advertisement - RW */
- E1000_PCS_LPAB = 0x0421C, /* Link Partner Ability - RW */
- E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
- E1000_RFCTL = 0x05008, /* Receive Filter Control */
- E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
- E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
-#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
-#define E1000_RA (E1000_RAL(0))
- E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
-#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
- E1000_SHRAL_PCH_LPT_BASE = 0x05408,
-#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
- E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
-#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
- E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
-#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
- E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
-#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
- E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
- E1000_WUC = 0x05800, /* Wakeup Control - RW */
- E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
- E1000_WUS = 0x05810, /* Wakeup Status - RO */
- E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */
- E1000_MANC = 0x05820, /* Management Control - RW */
- E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
- E1000_HOST_IF = 0x08800, /* Host Interface */
-
- E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
- E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
- E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
-#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
- E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
- E1000_GCR = 0x05B00, /* PCI-Ex Control */
- E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
- E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
- E1000_SWSM = 0x05B50, /* SW Semaphore */
- E1000_FWSM = 0x05B54, /* FW Semaphore */
- E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
- E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
-#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
- E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
-#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4))
- E1000_FFLT_DBG = 0x05F04, /* Debug Register */
- E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
-#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
-#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
- E1000_HICR = 0x08F00, /* Host Interface Control */
- E1000_SYSTIML = 0x0B600, /* System time register Low - RO */
- E1000_SYSTIMH = 0x0B604, /* System time register High - RO */
- E1000_TIMINCA = 0x0B608, /* Increment attributes register - RW */
- E1000_TSYNCTXCTL = 0x0B614, /* Tx Time Sync Control register - RW */
- E1000_TXSTMPL = 0x0B618, /* Tx timestamp value Low - RO */
- E1000_TXSTMPH = 0x0B61C, /* Tx timestamp value High - RO */
- E1000_TSYNCRXCTL = 0x0B620, /* Rx Time Sync Control register - RW */
- E1000_RXSTMPL = 0x0B624, /* Rx timestamp Low - RO */
- E1000_RXSTMPH = 0x0B628, /* Rx timestamp High - RO */
- E1000_RXMTRL = 0x0B634, /* Timesync Rx EtherType and Msg Type - RW */
- E1000_RXUDP = 0x0B638, /* Timesync Rx UDP Port - RW */
-};
-
-#define E1000_MAX_PHY_ADDR 4
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
-#define IGP_PAGE_SHIFT 5
-#define PHY_REG_MASK 0x1F
-
-#define BM_WUC_PAGE 800
-#define BM_WUC_ADDRESS_OPCODE 0x11
-#define BM_WUC_DATA_OPCODE 0x12
-#define BM_WUC_ENABLE_PAGE 769
-#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
-
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM 4
-#define IGP02E1000_PHY_AGC_A 0x11B1
-#define IGP02E1000_PHY_AGC_B 0x12B1
-#define IGP02E1000_PHY_AGC_C 0x14B1
-#define IGP02E1000_PHY_AGC_D 0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
-#define IGP02E1000_AGC_RANGE 15
-
-/* manage.c */
-#define E1000_VFTA_ENTRY_SHIFT 5
-#define E1000_VFTA_ENTRY_MASK 0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C 0x02
-#define E1000_HICR_FW_RESET_ENABLE 0x40
-#define E1000_HICR_FW_RESET 0x80
-
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
-
-#define E1000_MNG_IAMT_MODE 0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-
-/* nvm.c */
-#define E1000_STM_OPCODE 0xDB00
-
-#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
-#define E1000_KMRNCTRLSTA_REN 0x00200000
-#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
-#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
-#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED 0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
-#define IFE_PSC_FORCE_POLARITY 0x0020
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE 0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
-
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
-
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
@@ -776,6 +440,11 @@ struct e1000_host_mng_command_info {
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
+#include "mac.h"
+#include "phy.h"
+#include "nvm.h"
+#include "manage.h"
+
/* Function pointers for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
@@ -1005,4 +674,8 @@ struct e1000_hw {
} dev_spec;
};
+#include "82571.h"
+#include "80003es2lan.h"
+#include "ich8lan.h"
+
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a019b46f30e..dff7bff8b8e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -57,132 +57,6 @@
#include "e1000.h"
-#define ICH_FLASH_GFPREG 0x0000
-#define ICH_FLASH_HSFSTS 0x0004
-#define ICH_FLASH_HSFCTL 0x0006
-#define ICH_FLASH_FADDR 0x0008
-#define ICH_FLASH_FDATA0 0x0010
-#define ICH_FLASH_PR0 0x0074
-
-#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
-#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
-#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
-#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
-#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
-
-#define ICH_CYCLE_READ 0
-#define ICH_CYCLE_WRITE 2
-#define ICH_CYCLE_ERASE 3
-
-#define FLASH_GFPREG_BASE_MASK 0x1FFF
-#define FLASH_SECTOR_ADDR_SHIFT 12
-
-#define ICH_FLASH_SEG_SIZE_256 256
-#define ICH_FLASH_SEG_SIZE_4K 4096
-#define ICH_FLASH_SEG_SIZE_8K 8192
-#define ICH_FLASH_SEG_SIZE_64K 65536
-
-
-#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
-/* FW established a valid mode */
-#define E1000_ICH_FWSM_FW_VALID 0x00008000
-
-#define E1000_ICH_MNG_IAMT_MODE 0x2
-
-#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_OFF2 << 8) | \
- (ID_LED_DEF1_ON2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
-
-#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
-
-#define E1000_FEXTNVM_SW_CONFIG 1
-#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
-
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
-
-#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
-
-#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
-
-#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
-#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
-#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
-
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
-#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
-#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
-
-#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
-
-#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
-
-/* SMBus Control Phy Register */
-#define CV_SMB_CTRL PHY_REG(769, 23)
-#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
-
-/* SMBus Address Phy Register */
-#define HV_SMB_ADDR PHY_REG(768, 26)
-#define HV_SMB_ADDR_MASK 0x007F
-#define HV_SMB_ADDR_PEC_EN 0x0200
-#define HV_SMB_ADDR_VALID 0x0080
-#define HV_SMB_ADDR_FREQ_MASK 0x1100
-#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
-#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
-
-/* PHY Power Management Control */
-#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
-
-/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
-#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
-#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
-#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
-#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-
-/* Strapping Option Register - RO */
-#define E1000_STRAP 0x0000C
-#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
-#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
-#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
-#define E1000_STRAP_SMT_FREQ_SHIFT 12
-
-/* OEM Bits Phy Register */
-#define HV_OEM_BITS PHY_REG(768, 25)
-#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
-#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
-#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
-
-#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
-#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
-
-/* KMRN Mode Control */
-#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
-#define HV_KMRN_MDIO_SLOW 0x0400
-
-/* KMRN FIFO Control and Status */
-#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
-
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -4117,7 +3991,6 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* The SMBus release must also be disabled on LCD reset.
*/
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-
/* Enable proxy to reset only on power good. */
e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
new file mode 100644
index 00000000000..b6d3174d7d2
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -0,0 +1,268 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_ICH8LAN_H_
+#define _E1000E_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+/* Latency Tolerance Reporting */
+#define E1000_LTRV 0x000F8
+#define E1000_LTRV_SCALE_MAX 5
+#define E1000_LTRV_SCALE_FACTOR 5
+#define E1000_LTRV_REQ_SHIFT 15
+#define E1000_LTRV_NOSNOOP_SHIFT 16
+#define E1000_LTRV_SEND (1 << 30)
+
+/* Proprietary Latency Tolerance Reporting PCI Capability */
+#define E1000_PCI_LTR_CAP_LPT 0xA8
+
+/* OBFF Control & Threshold Defines */
+#define E1000_SVCR_OFF_EN 0x00000001
+#define E1000_SVCR_OFF_MASKINT 0x00001000
+#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
+#define E1000_SVCR_OFF_TIMER_SHIFT 16
+#define E1000_SVT_OFF_HWM_MASK 0x0000001F
+
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
new file mode 100644
index 00000000000..a61fee404eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MAC_H_
+#define _E1000E_MAC_H_
+
+s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000e_led_on_generic(struct e1000_hw *hw);
+s32 e1000e_led_off_generic(struct e1000_hw *hw);
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+s32 e1000e_setup_link_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000e_reset_adaptive(struct e1000_hw *hw);
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+void e1000e_update_adaptive(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 4dae0dbda83..e4b0f1ef92f 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -28,19 +28,6 @@
#include "e1000.h"
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE 0x544D4149
-
/**
* e1000_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
new file mode 100644
index 00000000000..326897c29ea
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MANAGE_H_
+#define _E1000E_MANAGE_H_
+
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e0eb5dddf3e..a177b8b65c4 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -86,20 +86,7 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
-
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
-
/* General Registers */
{E1000_CTRL, "CTRL"},
{E1000_STATUS, "STATUS"},
@@ -2024,7 +2011,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
/* Auto-Mask Other interrupts upon ICR read */
-#define E1000_EIAC_MASK_82574 0x01F00000
ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
ctrl_ext |= E1000_CTRL_EXT_EIAME;
ew32(CTRL_EXT, ctrl_ext);
@@ -4844,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
+
+ /* check if SmartSpeed worked */
+ e1000e_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev,
+ "Link Speed was downgraded by SmartSpeed\n");
+
/* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
@@ -6407,7 +6400,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
*/
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
-
}
static void e1000_print_device_info(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
new file mode 100644
index 00000000000..45fc6956162
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_NVM_H_
+#define _E1000E_NVM_H_
+
+s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000e_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index ae656f16c9f..0930c136aa3 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -53,48 +53,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
-#define BM_PHY_REG_PAGE(offset) \
- ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
-#define BM_PHY_REG_NUM(offset) \
- ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
- (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
- ~MAX_PHY_REG_ADDRESS)))
-
-#define HV_INTC_FC_PAGE_START 768
-#define I82578_ADDR_REG 29
-#define I82577_ADDR_REG 16
-#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
-#define I82577_CTRL_REG 23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2 18
-#define I82577_PHY_STATUS_2 26
-#define I82577_PHY_DIAG_STATUS 31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
-#define I82577_PHY_STATUS2_MDIX 0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
-#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
-#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* BM PHY Copper Specific Control 1 */
-#define BM_CS_CTRL1 16
-
-#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
-#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
-#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
-
/**
* e1000e_check_reset_block_generic - Check if PHY reset is blocked
* @hw: pointer to the HW structure
@@ -2516,7 +2474,6 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
-
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
new file mode 100644
index 00000000000..f4f71b9991e
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_PHY_H_
+#define _E1000E_PHY_H_
+
+s32 e1000e_check_downshift(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000e_get_phy_id(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
new file mode 100644
index 00000000000..794fe149766
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -0,0 +1,252 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_REGS_H_
+#define _E1000E_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 54a7c20d9fa..84e7e0909de 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -111,184 +111,168 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
return ext_mdio;
}
-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+/**
+ * igb_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- struct e1000_nvm_info *nvm = &hw->nvm;
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
- u32 eecd;
- s32 ret_val;
- u16 size;
- u32 ctrl_ext = 0;
+ s32 ret_val = 0;
+ u32 ctrl_ext;
- switch (hw->device_id) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- mac->type = e1000_82575;
- break;
- case E1000_DEV_ID_82576:
- case E1000_DEV_ID_82576_NS:
- case E1000_DEV_ID_82576_NS_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- case E1000_DEV_ID_82576_SERDES_QUAD:
- mac->type = e1000_82576;
- break;
- case E1000_DEV_ID_82580_COPPER:
- case E1000_DEV_ID_82580_FIBER:
- case E1000_DEV_ID_82580_QUAD_FIBER:
- case E1000_DEV_ID_82580_SERDES:
- case E1000_DEV_ID_82580_SGMII:
- case E1000_DEV_ID_82580_COPPER_DUAL:
- case E1000_DEV_ID_DH89XXCC_SGMII:
- case E1000_DEV_ID_DH89XXCC_SERDES:
- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
- case E1000_DEV_ID_DH89XXCC_SFP:
- mac->type = e1000_82580;
- break;
- case E1000_DEV_ID_I350_COPPER:
- case E1000_DEV_ID_I350_FIBER:
- case E1000_DEV_ID_I350_SERDES:
- case E1000_DEV_ID_I350_SGMII:
- mac->type = e1000_i350;
- break;
- case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_COPPER_OEM1:
- case E1000_DEV_ID_I210_COPPER_IT:
- case E1000_DEV_ID_I210_FIBER:
- case E1000_DEV_ID_I210_SERDES:
- case E1000_DEV_ID_I210_SGMII:
- mac->type = e1000_i210;
- break;
- case E1000_DEV_ID_I211_COPPER:
- mac->type = e1000_i211;
- break;
- default:
- return -E1000_ERR_MAC_INIT;
- break;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
}
- /* Set media type */
- /*
- * The 82575 uses bits 22:23 for link mode. The mode can be changed
- * based on the EEPROM. We cannot rely upon device ID. There
- * is no distinguishable difference between fiber and internal
- * SerDes mode on the 82575. There can be an external PHY attached
- * on the SGMII interface. For this, we'll set sgmii_active to true.
- */
- phy->media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = false;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
ctrl_ext = rd32(E1000_CTRL_EXT);
- switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- dev_spec->sgmii_active = true;
- break;
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- default:
- break;
+
+ if (igb_sgmii_active_82575(hw)) {
+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = igb_phy_hw_reset;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
}
- /* Set mta register count */
- mac->mta_reg_count = 128;
- /* Set rar entry count */
- switch (mac->type) {
- case e1000_82576:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ igb_reset_mdicnfg_82580(hw);
+
+ if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ phy->ops.read_reg = igb_read_phy_reg_82580;
+ phy->ops.write_reg = igb_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = igb_read_phy_reg_gs40g;
+ phy->ops.write_reg = igb_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = igb_read_phy_reg_igp;
+ phy->ops.write_reg = igb_write_phy_reg_igp;
+ }
+ }
+
+ /* set lan id */
+ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ E1000_STATUS_FUNC_SHIFT;
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = igb_get_phy_id_82575(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID)
+ phy->ops.get_cable_length =
+ igb_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = igb_get_cable_length_m88;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
- case e1000_82580:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.get_phy_info = igb_get_phy_info_igp;
+ phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
- case e1000_i350:
- mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.force_speed_duplex =
+ igb_phy_force_speed_duplex_82580;
+ phy->ops.get_cable_length = igb_get_cable_length_82580;
+ phy->ops.get_phy_info = igb_get_phy_info_82580;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break;
- default:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = igb_check_polarity_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
}
- /* reset */
- if (mac->type >= e1000_82580)
- mac->ops.reset_hw = igb_reset_hw_82580;
- else
- mac->ops.reset_hw = igb_reset_hw_82575;
- if (mac->type >= e1000_i210) {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- } else {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
- }
+out:
+ return ret_val;
+}
- /* Set if part includes ASF firmware */
- mac->asf_firmware_present = true;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid =
- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
- /* enable EEE on i350 parts and later parts */
- if (mac->type >= e1000_i350)
- dev_spec->eee_disable = false;
- else
- dev_spec->eee_disable = true;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? igb_setup_copper_link_82575
- : igb_setup_serdes_link_82575;
+/**
+ * igb_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+ u16 size;
- /* NVM initialization */
- eecd = rd32(E1000_EECD);
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
-
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
- /*
- * Check for invalid size
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
*/
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ if (size > 15)
size = 15;
- }
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
switch (nvm->override) {
case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
+ nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
+ nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
- nvm->page_size = eecd
- & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd
- & E1000_EECD_ADDR_BITS ? 16 : 8;
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
break;
}
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
- } else
+ } else {
nvm->type = e1000_nvm_flash_hw;
+ }
/* NVM Function Pointers */
switch (hw->mac.type) {
@@ -345,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* if part supports SR-IOV then initialize mailbox parameters */
+ return 0;
+}
+
+/**
+ * igb_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
switch (mac->type) {
case e1000_82576:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ break;
+ case e1000_82580:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ break;
case e1000_i350:
- igb_init_mbx_params_pf(hw);
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
break;
}
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = igb_reset_hw_82580;
+ else
+ mac->ops.reset_hw = igb_reset_hw_82575;
- /* setup PHY parameters */
- if (phy->media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
- return 0;
- }
-
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 100;
-
- ctrl_ext = rd32(E1000_CTRL_EXT);
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- /* PHY function pointers */
- if (igb_sgmii_active_82575(hw)) {
- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
} else {
- phy->ops.reset = igb_phy_hw_reset;
- ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
- wr32(E1000_CTRL_EXT, ctrl_ext);
- igb_reset_mdicnfg_82580(hw);
-
- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if ((hw->mac.type == e1000_82580)
- || (hw->mac.type == e1000_i350)) {
- phy->ops.read_reg = igb_read_phy_reg_82580;
- phy->ops.write_reg = igb_write_phy_reg_82580;
- } else if (hw->phy.type >= e1000_phy_i210) {
- phy->ops.read_reg = igb_read_phy_reg_gs40g;
- phy->ops.write_reg = igb_write_phy_reg_gs40g;
- } else {
- phy->ops.read_reg = igb_read_phy_reg_igp;
- phy->ops.write_reg = igb_write_phy_reg_igp;
- }
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Set if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* enable EEE on i350 parts and later parts */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+ else
+ dev_spec->eee_disable = true;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? igb_setup_copper_link_82575
+ : igb_setup_serdes_link_82575;
- /* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ return 0;
+}
- /* Set phy->phy_addr and phy->id. */
- ret_val = igb_get_phy_id_82575(hw);
- if (ret_val)
- return ret_val;
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ s32 ret_val;
+ u32 ctrl_ext = 0;
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1111_I_PHY_ID:
- phy->type = e1000_phy_m88;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ break;
+ }
- if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID)
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- else
- phy->ops.get_cable_length = igb_get_cable_length_m88;
+ /* Set media type */
+ /*
+ * The 82575 uses bits 22:23 for link mode. The mode can be changed
+ * based on the EEPROM. We cannot rely upon device ID. There
+ * is no distinguishable difference between fiber and internal
+ * SerDes mode on the 82575. There can be an external PHY attached
+ * on the SGMII interface. For this, we'll set sgmii_active to true.
+ */
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = false;
- if (phy->id == I210_I_PHY_ID) {
- phy->ops.get_cable_length =
- igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state =
- igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state =
- igb_set_d3_lplu_state_82580;
- }
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ dev_spec->sgmii_active = true;
break;
- case IGP03E1000_E_PHY_ID:
- phy->type = e1000_phy_igp_3;
- phy->ops.get_phy_info = igb_get_phy_info_igp;
- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
break;
- case I82580_I_PHY_ID:
- case I350_I_PHY_ID:
- phy->type = e1000_phy_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
- phy->ops.get_cable_length = igb_get_cable_length_82580;
- phy->ops.get_phy_info = igb_get_phy_info_82580;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ default:
break;
- case I210_I_PHY_ID:
- phy->type = e1000_phy_i210;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
- phy->ops.check_polarity = igb_check_polarity_m88;
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ }
+
+ /* mac initialization and operations */
+ ret_val = igb_init_mac_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* NVM initialization */
+ ret_val = igb_init_nvm_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
+ igb_init_mbx_params_pf(hw);
break;
default:
- return -E1000_ERR_PHY;
+ break;
}
- return 0;
+ /* setup PHY parameters */
+ ret_val = igb_init_phy_params_82575(hw);
+
+out:
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 4b78053592b..d27edbc6392 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -139,8 +139,6 @@ struct vf_data_storage {
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -169,6 +167,17 @@ enum igb_tx_flags {
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
@@ -275,10 +284,18 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 40b5d568d80..a3830a8ba4c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg)
- msleep(4000);
+ msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1aaf1935186..ed79a1c53b5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3354,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+#define IGB_MAX_BUILD_SKB_SIZE \
+ (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
+ (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
+
+ /* set build_skb flag */
+ if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
+ set_ring_build_skb_enabled(rx_ring);
+ else
+ clear_ring_build_skb_enabled(rx_ring);
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3373,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -4417,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-/*
- * The largest size we can write to the descriptor is 65535. In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
@@ -4592,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
- /* need: 1 descriptor per page,
+ /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
* + 1 desc for context descriptor,
- * otherwise try next time */
- if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ * otherwise try next time
+ */
+ if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
+ unsigned short f;
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ } else {
+ count += skb_shinfo(skb)->nr_frags;
+ }
+
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
@@ -4642,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -6046,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
netif_carrier_ok(tx_ring->netdev) &&
- igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -6097,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ struct page *page,
+ unsigned int truesize)
+{
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /* since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
/**
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -6119,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
{
struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
@@ -6141,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+ rx_buffer->page_offset, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+}
+static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+ void *page_addr;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(NET_SKB_PAD +
+ NET_IP_ALIGN +
+ size);
+#endif
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+ /* If we spanned a buffer we have a huge mess so test for it */
+ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
- /*
- * since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
- */
- atomic_set(&page->_count, 2);
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+ /* Guarantee this function can be used by verifying buffer sizes */
+ BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
+ NET_IP_ALIGN +
+ IGB_TS_HDR_LEN +
+ ETH_FRAME_LEN +
+ ETH_FCS_LEN));
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
- return false;
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
- /* bump ref count on page before it is given to the stack */
- get_page(page);
+ page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif
- return true;
+ /* build an skb to around the page buffer */
+ skb = build_skb(page_addr, truesize);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
+ }
+
+ if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
@@ -6184,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
page = rx_buffer->page;
prefetchw(page);
@@ -6590,8 +6694,17 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_rx_buffer(rx_ring, rx_desc);
+ else
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6678,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ if (ring_uses_build_skb(rx_ring))
+ return NET_SKB_PAD + NET_IP_ALIGN;
+ else
+ return 0;
+}
+
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -6704,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
+ bi->page_offset +
+ igb_rx_offset(rx_ring));
rx_desc++;
bi++;
@@ -7608,7 +7731,7 @@ static DEFINE_SPINLOCK(i2c_clients_lock);
* @adapter: adapter struct
* @dev_addr: device address of i2c needed.
*/
-struct i2c_client *
+static struct i2c_client *
igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
{
ulong flags;
@@ -7631,13 +7754,8 @@ igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
}
}
- /* no client_list found, create a new one as long as
- * irqs are not disabled
- */
- if (unlikely(irqs_disabled()))
- goto exit;
-
- client_list = kzalloc(sizeof(*client_list), GFP_KERNEL);
+ /* no client_list found, create a new one */
+ client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
if (client_list == NULL)
goto exit;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fdca7b67277..a1463e3d14c 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */
struct {
unsigned long time_stamp;
+ union e1000_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f53f7136e50..d60cd439341 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
bool cleaned = false;
i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ buffer_info = &tx_ring->buffer_info[i];
+ eop_desc = buffer_info->next_to_watch;
+
+ do {
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ buffer_info->next_to_watch = NULL;
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = buffer_info->skb;
if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++;
if (i == tx_ring->count)
i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
}
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
- }
+
+ eop_desc = buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb,
- unsigned int first)
+ struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE);
@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -2120,7 +2127,6 @@ dma_error:
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
if (count)
count--;
@@ -2139,7 +2145,8 @@ dma_error:
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ int tx_flags, int count,
+ unsigned int first, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */
wmb();
+ tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
- skb->len, hdr_len);
+ first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index c7564123dd3..ea480837343 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -708,11 +708,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor ring memory\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -797,11 +794,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptor ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
/* Round up to nearest 4K */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 687c83d1bda..be2989e6000 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8371ae4265f..a8e10cff7a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -92,8 +92,6 @@
*/
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -158,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@@ -203,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
@@ -280,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -626,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
+ __IXGBE_READ_I2C,
};
struct ixgbe_cb {
@@ -706,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 7fd3833c7ec..d0113fc97b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1003,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
* @eeprom_data: value read
*
- * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ * Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
{
s32 status = 0;
u16 sfp_addr = 0;
@@ -1025,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
- sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@@ -1057,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
status = IXGBE_ERR_PHY;
- goto out;
}
out:
@@ -1065,6 +1065,36 @@ out:
}
/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
* ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
* @hw: pointer to hardware structure
*
@@ -1297,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.write_reg = &ixgbe_write_phy_reg_generic,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
.check_overtemp = &ixgbe_tn_check_overtemp,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 33504617595..203a00c2433 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2241,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 5e68afdd502..99e472ebaa7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f7a0970a251..bc3948ead6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 9bc17c0cb97..1f2c805684d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 1f4108ee154..1634de8b627 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 87592b458c9..ac780770863 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index ba835708fca..3164f5453b8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 4eac80d0185..05e23b80b5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index 4dec47faeb0..a4ef07631d1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index c261333438b..f3d68f9696b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -449,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -460,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
*num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
- rval = -EINVAL;
+ return -EINVAL;
break;
}
} else {
- rval = -EINVAL;
+ return -EINVAL;
}
- return rval;
+ return 0;
}
static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 3504686d3af..c5933f6dcee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 216203ece6e..f4d2e9e3c6d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include "ixgbe.h"
+#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16
@@ -1040,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
p = (char *) adapter +
ixgbe_gstrings_stats[i].stat_offset;
break;
+ default:
+ data[i] = 0;
+ continue;
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
@@ -1096,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < IXGBE_TEST_LEN; i++) {
+ memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
@@ -2107,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- u16 tx_itr_param, rx_itr_param;
+ u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false;
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
- return -EINVAL;
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EINVAL;
+ tx_itr_prev = adapter->rx_itr_setting;
+ } else {
+ tx_itr_prev = adapter->tx_itr_setting;
+ }
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2139,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
+ /* mixed Rx/Tx */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+
+#if IS_ENABLED(CONFIG_BQL)
+ /* detect ITR changes that require update of TXDCTL.WTHRESH */
+ if ((adapter->tx_itr_setting > 1) &&
+ (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+ if ((tx_itr_prev == 1) ||
+ (tx_itr_prev > IXGBE_100K_ITR))
+ need_reset = true;
+ } else {
+ if ((tx_itr_prev > 1) &&
+ (tx_itr_prev < IXGBE_100K_ITR))
+ need_reset = true;
+ }
+#endif
/* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter);
+ need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
@@ -2726,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0;
}
+static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+{
+ unsigned int max_combined;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ /* SR-IOV currently only allows one queue on the PF */
+ max_combined = 1;
+ } else if (tcs > 1) {
+ /* For DCB report channels per traffic class */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ max_combined = 4;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ max_combined = 8;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ max_combined = 16;
+ }
+ } else if (adapter->atr_sample_rate) {
+ /* support up to 64 queues with ATR */
+ max_combined = IXGBE_MAX_FDIR_INDICES;
+ } else {
+ /* support up to 16 queues with RSS */
+ max_combined = IXGBE_MAX_RSS_INDICES;
+ }
+
+ return max_combined;
+}
+
+static void ixgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = ixgbe_max_channels(adapter);
+
+ /* report info for other vector */
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+ /* nothing else to report if RSS is disabled */
+ if (ch->combined_count == 1)
+ return;
+
+ /* we do not support ATR queueing if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return;
+
+ /* same thing goes for being DCB enabled */
+ if (netdev_get_num_tc(dev) > 1)
+ return;
+
+ /* if ATR is disabled we can exit */
+ if (!adapter->atr_sample_rate)
+ return;
+
+ /* report flow director queues as maximum channels */
+ ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
+}
+
+static int ixgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > ixgbe_max_channels(adapter))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ adapter->ring_feature[RING_F_FDIR].limit = count;
+
+ /* cap RSS limit at 16 */
+ if (count > IXGBE_MAX_RSS_INDICES)
+ count = IXGBE_MAX_RSS_INDICES;
+ adapter->ring_feature[RING_F_RSS].limit = count;
+
+#ifdef IXGBE_FCOE
+ /* cap FCoE limit at 8 */
+ if (count > IXGBE_FCRETA_SIZE)
+ count = IXGBE_FCRETA_SIZE;
+ adapter->ring_feature[RING_F_FCOE].limit = count;
+
+#endif
+ /* use setup TC to update any traffic class queue mapping */
+ return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+static int ixgbe_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+ u8 sff8472_rev, addr_mode;
+ int ret_val = 0;
+ bool page_swap = false;
+
+ /* avoid concurent i2c reads */
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+
+ /* used by the service task */
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ return ret_val;
+}
+
+static int ixgbe_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u8 databyte = 0xFF;
+ int i = 0;
+ int ret_val = 0;
+
+ /* ixgbe_get_module_info is called before this function in all
+ * cases, so we do not need any checks we already do above,
+ * and can trust ee->len to be a known value.
+ */
+
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Read the first block, SFF-8079 */
+ for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+
+ /* If the second block is requested, check if SFF-8472 is supported. */
+ if (ee->len == ETH_MODULE_SFF_8472_LEN) {
+ if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
+ return -EOPNOTSUPP;
+
+ /* Read the second block, SFF-8472 */
+ for (i = ETH_MODULE_SFF_8079_LEN;
+ i < ETH_MODULE_SFF_8472_LEN; i++) {
+ status = hw->phy.ops.read_i2c_sff8472(hw,
+ i - ETH_MODULE_SFF_8079_LEN, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ return ret_val;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -2754,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 496836765df..f58db453a97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -716,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
/* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (!buffer) {
- e_err(drv, "failed to allocate extra DDP buffer\n");
+ if (!buffer)
return -ENOMEM;
- }
dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index bf724da9937..3a02759b5e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8c74f739011..ef5f7a678ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) {
- /* reserve no more than number of CPUs */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
-
/* alloc queues for FCoE separately */
fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
- f->indices = min_t(u16, num_online_cpus(), f->limit);
- rss_i = max_t(u16, rss_i, f->indices);
+ rss_i = f->indices = f->limit;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = -1;
+ int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
+ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ac41361f932..68478d6dfa2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.11.33-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2012 Intel Corporation.";
+ "Copyright (c) 1999-2013 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -838,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -1399,6 +1399,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
/* set gso_size to avoid messing up TCP MSS */
skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
IXGBE_CB(skb)->append_cnt);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
}
static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
@@ -2785,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/*
* set WTHRESH to encourage burst writeback, it should not be set
- * higher than 1 when ITR is 0 as it could cause false TX hangs
+ * higher than 1 when:
+ * - ITR is 0 as it could cause false TX hangs
+ * - ITR is set to > 100k int/sec and BQL is enabled
*
* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
+#if IS_ENABLED(CONFIG_BQL)
+ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
+#else
if (!ring->q_vector || (ring->q_vector->itr < 8))
+#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
@@ -2812,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0;
}
+ /* initialize XPS */
+ if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+
+ if (q_vector)
+ netif_set_xps_queue(adapter->netdev,
+ &q_vector->affinity_mask,
+ ring->queue_index);
+ }
+
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
@@ -4464,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned int rss;
+ unsigned int rss, fdir;
u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -4479,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
- /* Set capability flags */
+ /* Set common capability flags and settings */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
+ adapter->atr_sample_rate = 20;
+ fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_FDIR].limit = fdir;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef CONFIG_IXGBE_DCA
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif /* CONFIG_IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+
+ /* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
+ adapter->ring_feature[RING_F_FDIR].limit = 0;
+ adapter->atr_sample_rate = 0;
+ adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ break;
+ case ixgbe_mac_82599EB:
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- case ixgbe_mac_82599EB:
- adapter->max_q_vectors = MAX_Q_VECTORS_82599;
- adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- /* Flow Director hash filters enabled */
- adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].limit =
- IXGBE_MAX_FDIR_INDICES;
- adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
-#ifdef IXGBE_FCOE
- adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-#ifdef CONFIG_IXGBE_DCB
- /* Default traffic class to use for FCoE */
- adapter->fcoe.up = IXGBE_FCOE_DEFTC;
-#endif
-#endif /* IXGBE_FCOE */
break;
default:
break;
@@ -4869,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5679,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ /* concurent i2c reads are not supported */
+ if (test_bit(__IXGBE_READ_I2C, &adapter->state))
+ return;
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -6344,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
-#ifdef IXGBE_FCOE
- __be16 protocol = vlan_get_protocol(skb);
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_ring_feature *f;
+ int txq;
- if (((protocol == htons(ETH_P_FCOE)) ||
- (protocol == htons(ETH_P_FIP))) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- struct ixgbe_ring_feature *f;
+ /*
+ * only execute the code below if protocol is FCoE
+ * or FIP and we have FCoE enabled on the adapter
+ */
+ switch (vlan_get_protocol(skb)) {
+ case __constant_htons(ETH_P_FCOE):
+ case __constant_htons(ETH_P_FIP):
+ adapter = netdev_priv(dev);
- f = &adapter->ring_feature[RING_F_FCOE];
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ break;
+ default:
+ return __netdev_pick_tx(dev, skb);
+ }
- while (txq >= f->indices)
- txq -= f->indices;
- txq += adapter->ring_feature[RING_F_FCOE].offset;
+ f = &adapter->ring_feature[RING_F_FCOE];
- return txq;
- }
-#endif
+ txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
- return txq;
- }
+ while (txq >= f->indices)
+ txq -= f->indices;
- return skb_tx_hash(dev, skb);
+ return txq + f->offset;
}
+#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -6780,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
}
+#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -6805,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_DCB
if (tc) {
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -6827,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false;
}
- ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
+
+#endif /* CONFIG_IXGBE_DCB */
+ ixgbe_init_interrupt_scheme(adapter);
+
if (netif_running(dev))
- ixgbe_open(dev);
+ return ixgbe_open(dev);
return 0;
}
-#endif /* CONFIG_IXGBE_DCB */
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
rtnl_lock();
-#ifdef CONFIG_IXGBE_DCB
ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
-#else
- if (netif_running(netdev))
- ixgbe_close(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- ixgbe_open(netdev);
-#endif
rtnl_unlock();
}
@@ -7001,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
@@ -7078,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
}
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
+ struct net_device *dev,
+ u32 filter_mask)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
u16 mode;
@@ -7098,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
+#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
+#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7210,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
- unsigned int indices = num_possible_cpus();
- unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7261,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
+ if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB
- if (ii->mac == ixgbe_mac_82598EB)
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_RSS_INDICES);
- else
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_FDIR_INDICES);
+ /* 8 TC w/ 4 queues per TC */
+ indices = 4 * MAX_TRAFFIC_CLASS;
+#else
+ indices = IXGBE_MAX_RSS_INDICES;
#endif
+ }
- if (ii->mac == ixgbe_mac_82598EB)
- indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
- else
- indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
- indices += min_t(unsigned int, num_possible_cpus(),
- IXGBE_MAX_FCOE_INDICES);
-#endif
- indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7434,13 +7463,17 @@ skip_sriov:
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ unsigned int fcoe_l;
+
if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
+ adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1f3e32b576a..d4a64e66539 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 42dd65e6ac9..e44ff47659b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index f4b2c0d662d..060d2ad2ac9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
- &identifier);
+ &identifier);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* ID Module
@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
vendor_oui =
@@ -1204,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
* ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1291,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
- ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 51b0a91f62b..886a3431cf5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
@@ -41,6 +42,8 @@
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -51,6 +54,7 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -88,6 +92,9 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance code */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
@@ -125,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 53d20475971..331987d6815 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index ee3507f0ea5..d44b4d21268 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 008f9cea68d..4713f9fc7f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 16ddf14e8ba..d118def16f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0bdcc88cbf5..6652e96c352 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2874,6 +2874,7 @@ struct ixgbe_phy_operations {
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
s32 (*check_overtemp)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2fa58437a42..66c5e946284 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -878,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f523f0204f1..00f25b5f297 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -712,16 +712,13 @@ static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
__be32 ipv4_dst)
{
#ifdef CONFIG_INET
- __be64 be_mac = 0;
unsigned char mac[ETH_ALEN];
if (!ipv4_is_multicast(ipv4_dst)) {
- if (cmd->fs.flow_type & FLOW_MAC_EXT) {
+ if (cmd->fs.flow_type & FLOW_MAC_EXT)
memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
- } else {
- be_mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
- memcpy(&mac, &be_mac, ETH_ALEN);
- }
+ else
+ memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
} else {
ip_eth_mc_map(ipv4_dst, mac);
}
@@ -744,7 +741,6 @@ static int add_ip_rule(struct mlx4_en_priv *priv,
spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
if (!spec_l2 || !spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
err = -ENOMEM;
goto free_spec;
}
@@ -785,7 +781,6 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
if (!spec_l2 || !spec_l3 || !spec_l4) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
err = -ENOMEM;
goto free_spec;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index f3c7961a271..e3c3d122979 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
return i;
}
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
+
+ /* Drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) &&
+ !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
+
+ /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
+ * is requested
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
+}
+
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index ac1c14f7424..5088dc5c3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
.priority = MLX4_DOMAIN_RFS,
};
int rc;
- __be64 mac;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp.list, &rule.list);
- mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
-
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
- memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
return 0;
}
+static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+{
+ unsigned int i;
+ for (i = ETH_ALEN - 1; i; --i) {
+ dst_mac[i] = src_mac & 0xff;
+ src_mac >>= 8;
+ }
+ memset(&dst_mac[ETH_ALEN], 0, 2);
+}
+
+static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
+ unsigned char *mac, int *qpn, u64 *reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = priv->port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (err)
+ en_warn(priv, "Failed Attaching Unicast\n");
+
+ return err;
+}
+
+static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
+ unsigned char *mac, int qpn, u64 reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ en_err(priv, "Invalid steering mode.\n");
+ }
+}
+
+static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ struct mlx4_mac_entry *entry;
+ int index = 0;
+ int err = 0;
+ u64 reg_id;
+ int *qpn = &priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
+ priv->dev->dev_addr);
+ index = mlx4_register_mac(dev, priv->port, mac);
+ if (index < 0) {
+ err = index;
+ en_err(priv, "Failed adding MAC: %pM\n",
+ priv->dev->dev_addr);
+ return err;
+ }
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
+ int base_qpn = mlx4_get_base_qpn(dev, priv->port);
+ *qpn = base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
+ if (err) {
+ en_err(priv, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ goto steer_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ entry->reg_id = reg_id;
+
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
+}
+
+static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int qpn = priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+ priv->dev->dev_addr);
+ mlx4_unregister_mac(dev, priv->port, mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ priv->dev->dev_addr)) {
+ en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
+ priv->port, priv->dev->dev_addr, qpn);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_qp_release_range(dev, qpn, 1);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ break;
+ }
+ }
+ }
+}
+
+static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
+ unsigned char *new_mac, unsigned char *prev_mac)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err = 0;
+ u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+
+ bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_unregister_mac(dev, priv->port,
+ prev_mac_u64);
+ hlist_del_rcu(&entry->hlist);
+ synchronize_rcu();
+ memcpy(entry->mac, new_mac, ETH_ALEN);
+ entry->reg_id = 0;
+ mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[mac_hash]);
+ mlx4_register_mac(dev, priv->port, new_mac_u64);
+ err = mlx4_en_uc_steer_add(priv, new_mac,
+ &qpn,
+ &entry->reg_id);
+ return err;
+ }
+ }
+ return -EINVAL;
+ }
+
+ return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
+}
+
u64 mlx4_en_mac_to_u64(u8 *addr)
{
u64 mac = 0;
@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
- priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
queue_work(mdev->workqueue, &priv->mac_task);
return 0;
}
@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
/* Remove old MAC and insert the new one */
- err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac);
+ err = mlx4_en_replace_mac(priv, priv->base_qpn,
+ priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
} else
- en_dbg(HW, priv, "Port is down while "
- "registering mac, exiting...\n");
+ en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
mutex_unlock(&mdev->state_lock);
}
@@ -482,7 +708,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
mlx4_en_clear_list(dev);
return;
}
@@ -526,181 +751,153 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
}
}
if (!found) {
- new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ new_mc = kmemdup(src_tmp,
+ sizeof(struct mlx4_en_mc_list),
GFP_KERNEL);
- if (!new_mc) {
- en_err(priv, "Failed to allocate current multicast list\n");
+ if (!new_mc)
return;
- }
- memcpy(new_mc, src_tmp,
- sizeof(struct mlx4_en_mc_list));
+
new_mc->action = MCLIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
-static void mlx4_en_set_multicast(struct net_device *dev)
+static void mlx4_en_set_rx_mode(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up)
return;
- queue_work(priv->mdev->workqueue, &priv->mcast_task);
+ queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
-static void mlx4_en_do_set_multicast(struct work_struct *work)
+static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
{
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- mcast_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_mc_list *mclist, *tmp;
- u64 mcast_addr = 0;
- u8 mc_list[16] = {0};
int err = 0;
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up) {
- en_dbg(HW, priv, "Card is not up, "
- "ignoring multicast change.\n");
- goto out;
- }
- if (!priv->port_up) {
- en_dbg(HW, priv, "Port is down, "
- "ignoring multicast change.\n");
- goto out;
- }
-
- if (!netif_carrier_ok(dev)) {
- if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
- if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
- netif_carrier_on(dev);
- en_dbg(LINK, priv, "Link Up\n");
- }
- }
- }
-
- /*
- * Promsicuous mode: disable all filters
- */
-
- if (dev->flags & IFF_PROMISC) {
- if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
- if (netif_msg_rx_status(priv))
- en_warn(priv, "Entering promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_PROMISC;
-
- /* Enable promiscouos mode */
- switch (mdev->dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_add(mdev->dev,
- priv->port,
- priv->base_qpn,
- MLX4_FS_PROMISC_UPLINK);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- break;
-
- case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling unicast promiscuous mode\n");
-
- /* Add the default qp number as multicast
- * promisc
- */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling multicast promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
- break;
-
- case MLX4_STEERING_MODE_A0:
- err = mlx4_SET_PORT_qpn_calc(mdev->dev,
- priv->port,
- priv->base_qpn,
- 1);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- break;
- }
-
- /* Disable port multicast filter (unconditionally) */
- err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
- 0, MLX4_MCAST_DISABLE);
- if (err)
- en_err(priv, "Failed disabling "
- "multicast filter\n");
-
- /* Disable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
- en_err(priv, "Failed disabling VLAN filter\n");
- }
- goto out;
- }
-
- /*
- * Not in promiscuous mode
- */
-
- if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
- en_warn(priv, "Leaving promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+ en_warn(priv, "Entering promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
- /* Disable promiscouos mode */
+ /* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling unicast promiscuous mode\n");
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
- priv->base_qpn, 0);
+ priv->base_qpn,
+ 1);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
- /* Enable port VLAN filter */
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
- en_err(priv, "Failed enabling VLAN filter\n");
+ en_err(priv, "Failed disabling VLAN filter\n");
}
+}
+
+static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
+{
+ int err = 0;
+
+ if (netif_msg_rx_status(priv))
+ en_warn(priv, "Leaving promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
+ }
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed enabling VLAN filter\n");
+}
+
+static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_mc_list *mclist, *tmp;
+ u64 mcast_addr = 0;
+ u8 mc_list[16] = {0};
+ int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
@@ -814,6 +1011,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
}
}
}
+}
+
+static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct netdev_hw_addr *ha;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ bool found;
+ u64 mac;
+ int err = 0;
+ struct hlist_head *bucket;
+ unsigned int i;
+ int removed = 0;
+ u32 prev_flags;
+
+ /* Note that we do not need to protect our mac_hash traversal with rcu,
+ * since all modification code is protected by mdev->state_lock
+ */
+
+ /* find what to remove */
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ found = false;
+ netdev_for_each_uc_addr(ha, dev) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ /* MAC address of the port is not in uc list */
+ if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+ found = true;
+
+ if (!found) {
+ mac = mlx4_en_mac_to_u64(entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ priv->base_qpn,
+ entry->reg_id);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+ entry->mac, priv->port);
+ ++removed;
+ }
+ }
+ }
+
+ /* if we didn't remove anything, there is no use in trying to add
+ * again once we are in a forced promisc mode state
+ */
+ if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
+ return;
+
+ prev_flags = priv->flags;
+ priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
+
+ /* find what to add */
+ netdev_for_each_uc_addr(ha, dev) {
+ found = false;
+ bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
+ ha->addr, priv->port);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ mac = mlx4_en_mac_to_u64(ha->addr);
+ memcpy(entry->mac, ha->addr, ETH_ALEN);
+ err = mlx4_register_mac(mdev->dev, priv->port, mac);
+ if (err < 0) {
+ en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ err = mlx4_en_uc_steer_add(priv, ha->addr,
+ &priv->base_qpn,
+ &entry->reg_id);
+ if (err) {
+ en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ } else {
+ unsigned int mac_hash;
+ en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
+ ha->addr, priv->port);
+ mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_add_head_rcu(&entry->hlist, bucket);
+ }
+ }
+ }
+
+ if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Forcing promiscuous mode on port:%d\n",
+ priv->port);
+ } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
+ priv->port);
+ }
+}
+
+static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ rx_mode_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
+ goto out;
+ }
+
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
+ if (dev->priv_flags & IFF_UNICAST_FLT)
+ mlx4_en_do_uc_filter(priv, dev, mdev);
+
+ /* Promsicuous mode: disable all filters */
+ if ((dev->flags & IFF_PROMISC) ||
+ (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
+ mlx4_en_set_promisc_mode(priv, mdev);
+ goto out;
+ }
+
+ /* Not in promiscuous mode */
+ if (priv->flags & MLX4_EN_FLAG_PROMISC)
+ mlx4_en_clear_promisc_mode(priv, mdev);
+
+ mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
@@ -876,9 +1237,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
- en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
- "rx_frames:%d rx_usecs:%d\n",
- priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+ en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -959,8 +1319,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
- en_err(priv, "Failed modifying moderation "
- "for cq:%d\n", ring);
+ en_err(priv, "Failed modifying moderation for cq:%d\n",
+ ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
@@ -1077,8 +1437,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
- err = mlx4_get_eth_qp(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn);
+ err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
@@ -1141,8 +1500,8 @@ int mlx4_en_start_port(struct net_device *dev)
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto tx_err;
}
/* Set default qp number */
@@ -1172,7 +1531,7 @@ int mlx4_en_start_port(struct net_device *dev)
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
/* Schedule multicast task to populate multicast list */
- queue_work(mdev->workqueue, &priv->mcast_task);
+ queue_work(mdev->workqueue, &priv->rx_mode_task);
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
@@ -1191,7 +1550,7 @@ tx_err:
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -1290,7 +1649,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mlx4_en_put_qp(priv);
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
@@ -1323,15 +1682,12 @@ static void mlx4_en_restart(struct work_struct *work)
watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- int i;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
- for (i = 0; i < priv->tx_ring_num; i++)
- netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
@@ -1563,17 +1919,92 @@ static int mlx4_en_set_features(struct net_device *netdev,
priv->ctrl_flags &=
cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ mlx4_en_update_loopback_state(netdev, features);
+
return 0;
}
+static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses, allow only
+ * permanent addresses if ndm_state is given
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Add FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+static int mlx4_en_fdb_del(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Del FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int mlx4_en_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev, int idx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+
+ if (mlx4_is_mfunc(mdev))
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
- .ndo_set_rx_mode = mlx4_en_set_multicast,
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
@@ -1588,6 +2019,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_fdb_add = mlx4_en_fdb_add,
+ .ndo_fdb_del = mlx4_en_fdb_del,
+ .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1644,7 +2078,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
- INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
@@ -1654,16 +2088,24 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
#endif
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
+ INIT_HLIST_HEAD(&priv->mac_hash[i]);
+
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
- priv->mac = mdev->dev->caps.def_mac[priv->port];
- if (ILLEGAL_MAC(priv->mac)) {
- en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
- priv->port, priv->mac);
+
+ /* Set default MAC */
+ dev->addr_len = ETH_ALEN;
+ mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
}
+ memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv);
@@ -1694,11 +2136,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
- /* Set defualt MAC */
- dev->addr_len = ETH_ALEN;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
-
/*
* Set driver features
*/
@@ -1718,6 +2155,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_STEERING_MODE_DEVICE_MANAGED)
dev->hw_features |= NETIF_F_NTUPLE;
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -1731,6 +2171,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* Configure port */
mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index fed26d867f4..ce38654bbdd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
- struct ethhdr *ethh;
- dma_addr_t dma;
- u64 s_mac;
int factor = priv->cqe_factor;
if (!priv->port_up)
@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
- /* Get pointer to first fragment since we haven't skb yet and
- * cast it to ethhdr struct */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
- DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
- s_mac = mlx4_en_mac_to_u64(ethh->h_source);
-
- /* If source MAC is equal to our own MAC and not performing
- * the selftest or flb disabled - drop the packet */
- if (s_mac == priv->mac &&
- !((dev->features & NETIF_F_LOOPBACK) ||
- priv->validate_loopback))
- goto next;
+ /* Check if we need to drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
+ struct ethhdr *ethh;
+ dma_addr_t dma;
+ /* Get pointer to first fragment since we haven't
+ * skb yet and cast it to ethhdr struct
+ */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
+
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ /* Drop the packet, since HW loopback-ed it */
+ mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ethh->h_source)) {
+ rcu_read_unlock();
+ goto next;
+ }
+ }
+ rcu_read_unlock();
+ }
+ }
/*
* Packet is OK - process it.
@@ -835,11 +852,9 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof *context , GFP_KERNEL);
- if (!context) {
- en_err(priv, "Failed to allocate qp context\n");
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
return -ENOMEM;
- }
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index bf2e5d3f177..3488c6d9e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 30724d81111..49308cc65ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -294,6 +294,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
cnt++;
}
+ netdev_tx_reset_queue(ring->tx_queue);
+
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
@@ -640,7 +642,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}
- if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+ if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12ddae6efce..b9dde139dac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- if ((dev_cap->flags &
+ if ((dev->caps.flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
@@ -1833,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
- INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn =
- dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
- (port - 1) * (1 << log_num_mac);
+ info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 172daaa29a9..ed4a6959e82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -653,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
-struct mlx4_mac_entry {
- u64 mac;
- u64 reg_id;
-};
-
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@@ -667,7 +662,6 @@ struct mlx4_port_info {
char dev_mtu_name[16];
struct device_attribute port_mtu_attr;
struct mlx4_mac_table mac_table;
- struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table;
int base_qpn;
};
@@ -916,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
-int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 43f01650e58..c313d7e943a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -198,7 +198,6 @@ enum cq_type {
*/
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y))
-#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
@@ -432,6 +431,21 @@ struct ethtool_flow_id {
u64 id;
};
+enum {
+ MLX4_EN_FLAG_PROMISC = (1 << 0),
+ MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
+ /* whether we need to enable hardware loopback by putting dmac
+ * in Tx WQE
+ */
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
+ /* whether we need to drop packets that hardware loopback-ed */
+ MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
+ MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
+};
+
+#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
+#define MLX4_EN_MAC_HASH_IDX 5
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -472,7 +486,7 @@ struct mlx4_en_priv {
int registered;
int allocated;
int stride;
- u64 mac;
+ unsigned char prev_mac[ETH_ALEN + 2];
int mac_index;
unsigned max_mtu;
int base_qpn;
@@ -481,8 +495,6 @@ struct mlx4_en_priv {
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
u32 flags;
-#define MLX4_EN_FLAG_PROMISC 0x1
-#define MLX4_EN_FLAG_MC_PROMISC 0x2
u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
@@ -496,7 +508,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
- struct work_struct mcast_task;
+ struct work_struct rx_mode_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
@@ -513,6 +525,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
int base_tx_qpn;
+ struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -532,8 +545,18 @@ enum mlx4_en_wol {
MLX4_EN_WOL_ENABLED = (1ULL << 62),
};
+struct mlx4_mac_entry {
+ struct hlist_node hlist;
+ unsigned char mac[ETH_ALEN + 2];
+ u64 reg_id;
+ struct rcu_head rcu;
+};
+
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features);
+
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4c51b05efa2..719ead15e49 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u64 *reg_id)
-{
- __be64 be_mac;
- int err;
-
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
-
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
-
- qp.qpn = *qpn;
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- struct mlx4_spec_list spec_eth = { {NULL} };
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- rule.port = port;
- rule.qpn = *qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
- memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
- list_add_tail(&spec_eth.list, &rule.list);
-
- err = mlx4_flow_attach(dev, &rule, reg_id);
- break;
- }
- default:
- return -EINVAL;
- }
- if (err)
- mlx4_warn(dev, "Failed Attaching Unicast\n");
-
- return err;
-}
-
-static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u64 reg_id)
-{
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
-
- qp.qpn = qpn;
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- mlx4_flow_detach(dev, reg_id);
- break;
- }
- default:
- mlx4_err(dev, "Invalid steering mode.\n");
- }
-}
-
static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index)
{
@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
return -EINVAL;
}
-int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
- int index = 0;
- int err = 0;
- u64 reg_id;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
- (unsigned long long) mac);
- index = mlx4_register_mac(dev, port, mac);
- if (index < 0) {
- err = index;
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
- (unsigned long long) mac);
- return err;
- }
-
- if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- *qpn = info->base_qpn + index;
- return 0;
- }
-
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
- }
- entry->mac = mac;
- entry->reg_id = reg_id;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err)
- goto insert_err;
- return 0;
-
-insert_err:
- kfree(entry);
-
-alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, port, mac);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
-
-void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
- (unsigned long long) mac);
- mlx4_unregister_mac(dev, port, mac);
-
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
- " qpn %d\n", port,
- (unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_qp_release_range(dev, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- kfree(entry);
- }
- }
-}
-EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
-
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
+{
+ return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << dev->caps.log_num_macs);
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- struct mlx4_mac_entry *entry;
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (!entry)
- return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_unregister_mac(dev, port, entry->mac);
- entry->mac = new_mac;
- entry->reg_id = 0;
- mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac,
- &qpn, &entry->reg_id);
- return err;
- }
-
/* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
@@ -439,7 +262,7 @@ out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index db50598006e..4782dcfde73 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -201,7 +201,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 457ca8ec9e7..61b594c6d9d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -1442,7 +1442,9 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
/* Functions from qlcnic_ethtool.c */
-int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 65233c84653..f7b39d1067f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
@@ -5,13 +12,6 @@
#include <linux/interrupt.h>
#define QLCNIC_MAX_TX_QUEUES 1
-
-#define QLCNIC_MBX_RSP(reg) LSW(reg)
-#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
-#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
-#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
-#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
-
#define RSS_HASHTYPE_IP_TCP 0x3
/* status descriptor mailbox data
@@ -257,8 +257,6 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.config_intr_coal = qlcnic_83xx_config_intr_coal,
.config_rss = qlcnic_83xx_config_rss,
.config_hw_lro = qlcnic_83xx_config_hw_lro,
- .config_loopback = qlcnic_83xx_set_lb_mode,
- .clear_loopback = qlcnic_83xx_clear_lb_mode,
.config_promisc_mode = qlcnic_83xx_nic_set_promisc,
.change_l2_filter = qlcnic_83xx_change_l2_filter,
.get_board_info = qlcnic_83xx_get_port_info,
@@ -666,6 +664,21 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
pr_info("\n");
}
+/* Mailbox response for mac rcode */
+static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+{
+ u32 fw_data;
+ u8 mac_cmd_rcode;
+
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
+ return QLCNIC_RCODE_SUCCESS;
+ return 1;
+}
+
static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
{
u32 data;
@@ -688,8 +701,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
{
int i;
u16 opcode;
- u8 mbx_err_code, mac_cmd_rcode;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, temp, fw[8];
+ u8 mbx_err_code;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
struct qlcnic_hardware_context *ahw = adapter->ahw;
opcode = LSW(cmd->req.arg[0]);
@@ -724,79 +737,44 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
poll:
rsp = qlcnic_83xx_mbx_poll(adapter);
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
-
if (rsp != QLCNIC_RCODE_TIMEOUT) {
- if (opcode == QLCNIC_MBX_LINK_EVENT) {
- for (i = 0; i < rsp_num; i++) {
- temp = readl(QLCNIC_MBX_FW(ahw, i));
- fw[i] = temp;
- }
- qlcnic_83xx_handle_link_aen(adapter, fw);
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val)
- goto poll;
- } else if (opcode == QLCNIC_MBX_COMP_EVENT) {
- for (i = 0; i < rsp_num; i++) {
- temp = readl(QLCNIC_MBX_FW(ahw, i));
- fw[i] = temp;
- }
- qlcnic_83xx_handle_idc_comp_aen(adapter, fw);
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val)
- goto poll;
- } else if (opcode == QLCNIC_MBX_REQUEST_EVENT) {
- /* IDC Request Notification */
- for (i = 0; i < rsp_num; i++) {
- temp = readl(QLCNIC_MBX_FW(ahw, i));
- fw[i] = temp;
- }
- for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) {
- temp = QLCNIC_MBX_RSP(fw[i]);
- adapter->ahw->mbox_aen[i] = temp;
- }
- queue_delayed_work(adapter->qlcnic_wq,
- &adapter->idc_aen_work, 0);
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+ /* Get the FW response data */
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
+ qlcnic_83xx_process_aen(adapter);
mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
if (mbx_val)
goto poll;
- } else if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
- (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
- qlcnic_83xx_get_mbx_data(adapter, cmd);
+ }
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+ opcode = QLCNIC_MBX_RSP(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
rsp = QLCNIC_RCODE_SUCCESS;
- } else {
- qlcnic_83xx_get_mbx_data(adapter, cmd);
+ break;
+ default:
if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- fw_data = readl(QLCNIC_MBX_FW(ahw, 2));
- mac_cmd_rcode = (u8)fw_data;
- if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
- mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
- mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
- rsp = QLCNIC_RCODE_SUCCESS;
+ rsp = qlcnic_83xx_mac_rcode(adapter);
+ if (!rsp)
goto out;
- }
}
- dev_info(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x\n",
+ opcode, mbx_err_code);
rsp = mbx_err_code;
qlcnic_dump_mbx(adapter, cmd);
+ break;
}
- } else {
- dev_info(&adapter->pdev->dev,
- "MBX command 0x%x timed out\n", opcode);
- qlcnic_dump_mbx(adapter, cmd);
+ goto out;
}
+
+ dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+ QLCNIC_MBX_RSP(mbx_cmd));
+ rsp = QLCNIC_RCODE_TIMEOUT;
out:
/* clear fw mbx control register */
QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
@@ -868,20 +846,10 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
- u32 mask, resp, event[QLC_83XX_MBX_AEN_CNT];
+ u32 event[QLC_83XX_MBX_AEN_CNT];
int i;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!spin_trylock(&ahw->mbx_lock)) {
- mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
- writel(0, adapter->ahw->pci_base0 + mask);
- return;
- }
- resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
-
- if (!(resp & QLCNIC_SET_OWNER))
- goto out;
-
for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -916,10 +884,6 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
}
QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
-out:
- mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
- writel(0, adapter->ahw->pci_base0 + mask);
- spin_unlock(&ahw->mbx_lock);
}
static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
@@ -1166,6 +1130,100 @@ out:
return err;
}
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ /* disable and free mailbox interrupt */
+ qlcnic_83xx_free_mbx_intr(adapter);
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring, err;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ writel(1, sds_ring->crb_intr_mask);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to setup mbx interrupt\n",
+ __func__);
+ goto out;
+ }
+ }
+ adapter->ahw->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
u32 beacon)
{
@@ -1232,10 +1290,10 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
if (enable) {
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
- cmd.req.arg[1] = 1 | BIT_0;
+ cmd.req.arg[1] = BIT_0 | BIT_31;
} else {
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
- cmd.req.arg[1] = 0 | BIT_0;
+ cmd.req.arg[1] = BIT_0 | BIT_31;
}
status = qlcnic_issue_cmd(adapter, &cmd);
if (status)
@@ -1313,6 +1371,57 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
return err;
}
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+
+ QLCDB(adapter, DRV, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported for non privilege function\n");
+ return ret;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(500);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -QLCNIC_FW_NOT_RESPOND;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_alloc:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1341,13 +1450,15 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
- /* Wait until firmware send IDC Completion AEN */
+ /* Wait for Link and IDC Completion AEN */
do {
msleep(300);
+ qlcnic_83xx_process_aen(adapter);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
dev_err(&adapter->pdev->dev,
"FW did not generate IDC completion AEN\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
return -EIO;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1379,9 +1490,10 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
- /* Wait until firmware send IDC Completion AEN */
+ /* Wait for Link and IDC Completion AEN */
do {
msleep(300);
+ qlcnic_83xx_process_aen(adapter);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
dev_err(&adapter->pdev->dev,
"Firmware didn't sent IDC completion AEN\n");
@@ -1613,7 +1725,21 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
- qlcnic_83xx_process_aen(adapter);
+ unsigned long flags;
+ u32 mask, resp, event;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+
return IRQ_HANDLED;
}
@@ -2635,25 +2761,37 @@ int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
return i;
}
-int qlcnic_83xx_interrupt_test(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
{
- u8 val;
- int ret;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
u32 data;
u16 intrpt_id, id;
+ u8 val;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- intrpt_id = adapter->ahw->intr_tbl[0].id;
+ intrpt_id = ahw->intr_tbl[0].id;
else
- intrpt_id = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_ID);
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
- cmd->req.arg[1] = 1;
- cmd->req.arg[2] = intrpt_id;
- cmd->req.arg[3] = BIT_0;
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
- ret = qlcnic_issue_cmd(adapter, cmd);
- data = cmd->rsp.arg[2];
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
id = LSW(data);
val = LSB(MSW(data));
if (id != intrpt_id)
@@ -2661,9 +2799,21 @@ int qlcnic_83xx_interrupt_test(struct qlcnic_adapter *adapter,
"Interrupt generated: 0x%x, requested:0x%x\n",
id, intrpt_id);
if (val)
- dev_info(&adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+fail_diag_irq:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2b44eb14d52..f60e28ab994 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#ifndef __QLCNIC_83XX_HW_H
#define __QLCNIC_83XX_HW_H
@@ -130,9 +137,6 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_MINOR_VERSION 0
#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
-/* Mailbox process AEN count */
-#define QLC_83XX_MBX_AEN_CNT 5
-
struct qlcnic_adapter;
struct qlc_83xx_idc {
int (*state_entry) (struct qlcnic_adapter *);
@@ -149,6 +153,12 @@ struct qlc_83xx_idc {
char **name;
};
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
/* Mailbox process AEN count */
#define QLC_83XX_IDC_COMP_AEN 3
#define QLC_83XX_MBX_AEN_CNT 5
@@ -418,7 +428,7 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *);
int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
-int qlcnic_83xx_interrupt_test(struct qlcnic_adapter *,
- struct qlcnic_cmd_args *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 8163e5b338e..c53832b02b3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include "qlcnic_hw.h"
@@ -578,6 +585,9 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
{
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
qlcnic_83xx_enable_mbx_intrpt(adapter);
if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
if (qlcnic_83xx_config_intrpt(adapter, 1)) {
@@ -2018,6 +2028,9 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_clear_function_resources(adapter);
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
qlcnic_83xx_read_flash_mfg_id(adapter);
@@ -2034,9 +2047,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
-
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index b3ef33a6722..b0c3de9ede0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include "qlcnic_hw.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 7372964d3a7..4a3bd64b7fb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index f65fd7b6f9a..5641f8ec49a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -823,38 +823,36 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
if (ret)
- goto clear_it;
+ goto clear_diag_irq;
- adapter->ahw->diag_cnt = 0;
+ ahw->diag_cnt = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
- if (qlcnic_83xx_check(adapter)) {
- ret = qlcnic_83xx_interrupt_test(adapter, &cmd);
- } else {
- cmd.req.arg[1] = adapter->ahw->pci_func;
- ret = qlcnic_issue_cmd(adapter, &cmd);
- }
-
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret)
goto done;
usleep_range(1000, 12000);
- ret = !adapter->ahw->diag_cnt;
+ ret = !ahw->diag_cnt;
done:
qlcnic_free_mbx_args(&cmd);
qlcnic_diag_free_res(netdev, max_sds_rings);
-clear_it:
+clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -883,7 +881,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -925,7 +923,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int max_sds_rings = adapter->max_sds_rings;
@@ -935,13 +933,14 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
int ret;
if (qlcnic_83xx_check(adapter))
- goto skip_cap;
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
dev_info(&adapter->pdev->dev,
"Firmware do not support loopback test\n");
return -EOPNOTSUPP;
}
-skip_cap:
+
dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
mode == QLCNIC_ILB_MODE ? "internal" : "external");
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
@@ -962,9 +961,6 @@ skip_cap:
if (ret)
goto free_res;
- if (qlcnic_83xx_check(adapter))
- goto skip_fw_msg;
-
ahw->diag_cnt = 0;
do {
msleep(500);
@@ -979,21 +975,9 @@ skip_cap:
goto free_res;
}
} while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
-skip_fw_msg:
- if (qlcnic_83xx_check(adapter)) {
- /* wait until firmware report link up before running traffic */
- loop = 0;
- do {
- msleep(500);
- if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
- dev_info(&adapter->pdev->dev,
- "No linkup event after LB req\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
- goto free_res;
- }
- } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
- }
+
ret = qlcnic_do_lb_test(adapter, mode);
+
qlcnic_clear_lb_mode(adapter, mode);
free_res:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 74f77112298..44197ca1456 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4ee92b254fc..51716ab8739 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 9673e2bcff0..5b8749eda11 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#ifndef __QLCNIC_HW_H
#define __QLCNIC_HW_H
@@ -128,6 +135,7 @@ struct qlcnic_mailbox_metadata {
#define QLCNIC_MBX_RSP_OK 1
#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
struct qlcnic_pci_info;
struct qlcnic_info;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 10ad25d7aa0..d28336fc65a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index d00f6283274..93839f8a2f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
@@ -1036,8 +1043,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
th->seq = htonl(seq_number);
length = skb->len;
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
@@ -1546,6 +1558,24 @@ skip:
return count;
}
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ unsigned long flags;
+ u32 mask, resp, event;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+}
+
static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
{
int tx_complete;
@@ -1560,7 +1590,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
tx_ring = adapter->tx_ring;
if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
+ qlcnic_83xx_poll_process_aen(adapter);
tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index fb0255a90ce..b9531683158 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -247,8 +247,8 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-static int qlcnic_fdb_del(struct ndmsg *ndm, struct net_device *netdev,
- const unsigned char *addr)
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev, const unsigned char *addr)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
@@ -455,12 +455,13 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (num_msix) {
dev_info(&pdev->dev,
- "Trying %d MSI-X interrupt vectors\n",
+ "Trying to allocate %d MSI-X interrupt vectors\n",
num_msix);
goto enable_msix;
}
} else {
- dev_info(&pdev->dev, "Failed to get %d vectors\n",
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
num_msix);
}
}
@@ -1174,8 +1175,6 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
qlcnic_dev_set_npar_ready(adapter);
- if (qlcnic_83xx_check(adapter))
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
return err;
}
@@ -1505,10 +1504,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- if (qlcnic_83xx_check(adapter))
- writel(1, sds_ring->crb_intr_mask);
- else
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_int(sds_ring);
}
}
@@ -1601,10 +1597,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- if (qlcnic_82xx_check(adapter))
- qlcnic_enable_int(sds_ring);
- else
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_int(sds_ring);
}
}
@@ -2025,6 +2018,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
if (adapter->flags & QLCNIC_MSIX_ENABLED)
qlcnic_83xx_config_intrpt(adapter, 0);
qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
}
qlcnic_detach(adapter);
@@ -2182,10 +2177,6 @@ static int qlcnic_close(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
__qlcnic_down(adapter, netdev);
- if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
- cancel_delayed_work_sync(&adapter->idc_aen_work);
- }
return 0;
}
@@ -3063,6 +3054,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
}
if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -3117,6 +3110,8 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (adapter->flags & QLCNIC_MSIX_ENABLED)
qlcnic_83xx_config_intrpt(adapter, 0);
qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
}
qlcnic_detach(adapter);
@@ -3224,6 +3219,8 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
}
if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 8dbc8e76bc7..abbd22c814a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1,3 +1,9 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
#include "qlcnic.h"
#include "qlcnic_hdr.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 504506349ac..987fb6f8adc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 325627e287f..b13ab544a7e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2920,14 +2920,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate small buffer queue control blocks.
*/
- rx_ring->sbq =
- kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue control block allocation failed.\n");
+ rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL)
goto err_mem;
- }
ql_init_sbq_ring(qdev, rx_ring);
}
@@ -2948,14 +2945,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate large buffer queue control blocks.
*/
- rx_ring->lbq =
- kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue control block allocation failed.\n");
+ rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL)
goto err_mem;
- }
ql_init_lbq_ring(qdev, rx_ring);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4208f28d5d1..8900398ba10 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -450,7 +450,6 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
-#define FORCE_CLK (1 << 15) /* force clock request */
};
enum rtl_register_content {
@@ -514,7 +513,6 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
- ClkReqEn = (1 << 7), /* Clock Request Enable */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -535,7 +533,6 @@ enum rtl_register_content {
Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
- ASPM_en = (1 << 0), /* ASPM enable */
/* TBICSR p.28 */
TBIReset = 0x80000000,
@@ -684,7 +681,6 @@ enum features {
RTL_FEATURE_WOL = (1 << 0),
RTL_FEATURE_MSI = (1 << 1),
RTL_FEATURE_GMII = (1 << 2),
- RTL_FEATURE_FW_LOADED = (1 << 3),
};
struct rtl8169_counters {
@@ -2388,10 +2384,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
struct rtl_fw *rtl_fw = tp->rtl_fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw)) {
+ if (!IS_ERR_OR_NULL(rtl_fw))
rtl_phy_write_fw(tp, rtl_fw);
- tp->features |= RTL_FEATURE_FW_LOADED;
- }
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2402,31 +2396,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
-static void r810x_aldps_disable(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
-}
-
-static void r810x_aldps_enable(struct rtl8169_private *tp)
-{
- if (!(tp->features & RTL_FEATURE_FW_LOADED))
- return;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x8310);
-}
-
-static void r8168_aldps_enable_1(struct rtl8169_private *tp)
-{
- if (!(tp->features & RTL_FEATURE_FW_LOADED))
- return;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
-}
-
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3217,8 +3186,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
- r8168_aldps_enable_1(tp);
-
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
}
@@ -3293,8 +3260,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3302,8 +3267,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3401,8 +3364,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3488,19 +3449,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
rtl_apply_firmware(tp);
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
-
- r810x_aldps_enable(tp);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(20);
rtl_apply_firmware(tp);
@@ -3510,8 +3473,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r810x_aldps_enable(tp);
}
static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3524,7 +3485,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
rtl_apply_firmware(tp);
@@ -3532,8 +3495,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- r810x_aldps_enable(tp);
}
static void rtl_hw_phy_config(struct net_device *dev)
@@ -5050,6 +5011,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
+ rtl_disable_clock_request(pdev);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
@@ -5058,8 +5021,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5084,12 +5046,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
+ rtl_disable_clock_request(pdev);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
- RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5146,10 +5109,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5365,9 +5326,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
- RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
@@ -5393,9 +5351,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
- RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5417,10 +5372,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- RTL_W32(MISC,
- (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ecfb43614d7..d457fa2d750 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1250,12 +1250,11 @@ static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
BUG_ON(!pd->tx_ring);
- pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
- TX_RING_SIZE), GFP_KERNEL);
- if (!pd->tx_buffers) {
- smsc_warn(IFUP, "Failed to allocated tx_buffers");
+ pd->tx_buffers = kmalloc_array(TX_RING_SIZE,
+ sizeof(struct smsc9420_ring_info),
+ GFP_KERNEL);
+ if (!pd->tx_buffers)
return -ENOMEM;
- }
/* Initialize the TX Ring */
for (i = 0; i < TX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd4d659f0fa..39c6c552463 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -69,7 +69,7 @@
#undef STMMAC_XMIT_DEBUG
/*#define STMMAC_XMIT_DEBUG*/
-#ifdef STMMAC_TX_DEBUG
+#ifdef STMMAC_XMIT_DEBUG
#define TX_DBG(fmt, args...) printk(fmt, ## args)
#else
#define TX_DBG(fmt, args...) do { } while (0)
@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "pause:", 6)) {
if (kstrtoint(opt + 6, 0, &pause))
goto err;
- } else if (!strncmp(opt, "eee_timer:", 6)) {
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0376a5e6b2b..0b9829fe3ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
- priv->mii = new_bus;
-
found = 0;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
struct phy_device *phydev = new_bus->phy_map[addr];
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)
}
}
- if (!found)
+ if (!found) {
pr_warning("%s: No PHY found\n", ndev->name);
+ mdiobus_unregister(new_bus);
+ mdiobus_free(new_bus);
+ return -ENODEV;
+ }
+
+ priv->mii = new_bus;
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9e63bff124f..7e93df6585e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/if_vlan.h>
#include <linux/platform_data/cpsw.h>
@@ -118,6 +119,13 @@ do { \
#define TX_PRIORITY_MAPPING 0x33221100
#define CPDMA_TX_PRIORITY_MAP 0x76543210
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 15)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -250,7 +258,7 @@ struct cpsw_ss_regs {
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
- u32 flow_thresh;
+ u32 tx_in_ctl;
u32 port_vlan;
u32 tx_pri_map;
u32 cpdma_tx_pri_map;
@@ -277,6 +285,9 @@ struct cpsw_slave {
u32 mac_control;
struct cpsw_slave_data *data;
struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ u32 open_stat;
};
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -315,17 +326,65 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- struct cpts cpts;
+ struct cpts *cpts;
+ u32 emac_port;
};
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
-#define for_each_slave(priv, func, arg...) \
- do { \
- int idx; \
- for (idx = 0; idx < (priv)->data.slaves; idx++) \
- (func)((priv)->slaves + idx, ##arg); \
+#define for_each_slave(priv, func, arg...) \
+ do { \
+ int idx; \
+ if (priv->data.dual_emac) \
+ (func)((priv)->slaves + priv->emac_port, ##arg);\
+ else \
+ for (idx = 0; idx < (priv)->data.slaves; idx++) \
+ (func)((priv)->slaves + idx, ##arg); \
+ } while (0)
+#define cpsw_get_slave_ndev(priv, __slave_no__) \
+ (priv->slaves[__slave_no__].ndev)
+#define cpsw_get_slave_priv(priv, __slave_no__) \
+ ((priv->slaves[__slave_no__].ndev) ? \
+ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
+
+#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
+ do { \
+ if (!priv->data.dual_emac) \
+ break; \
+ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
+ ndev = cpsw_get_slave_ndev(priv, 0); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
+ ndev = cpsw_get_slave_ndev(priv, 1); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } \
+ } while (0)
+#define cpsw_add_mcast(priv, addr) \
+ do { \
+ if (priv->data.dual_emac) { \
+ struct cpsw_slave *slave = priv->slaves + \
+ priv->emac_port; \
+ int slave_port = cpsw_get_slave_port(priv, \
+ slave->slave_num); \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ 1 << slave_port | 1 << priv->host_port, \
+ ALE_VLAN, slave->port_vlan, 0); \
+ } else { \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ ALE_ALL_PORTS << priv->host_port, \
+ 0, 0, 0); \
+ } \
} while (0)
+static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -344,8 +403,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ cpsw_add_mcast(priv, (u8 *)ha->addr);
}
}
}
@@ -379,7 +437,7 @@ void cpsw_tx_handler(void *token, int len, int status)
*/
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
- cpts_tx_timestamp(&priv->cpts, skb);
+ cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -392,6 +450,8 @@ void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
+ cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+
/* free and bail if we are shutting down */
if (unlikely(!netif_running(ndev)) ||
unlikely(!netif_carrier_ok(ndev))) {
@@ -400,7 +460,7 @@ void cpsw_rx_handler(void *token, int len, int status)
}
if (likely(status >= 0)) {
skb_put(skb, len);
- cpts_rx_timestamp(&priv->cpts, skb);
+ cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
@@ -420,7 +480,7 @@ void cpsw_rx_handler(void *token, int len, int status)
return;
ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
}
WARN_ON(ret < 0);
}
@@ -433,37 +493,38 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
cpsw_disable_irq(priv);
napi_schedule(&priv->napi);
+ } else {
+ priv = cpsw_get_slave_priv(priv, 1);
+ if (likely(priv) && likely(netif_running(priv->ndev))) {
+ cpsw_intr_disable(priv);
+ cpsw_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
}
return IRQ_HANDLED;
}
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
-{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
-}
-
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- num_rx = cpdma_chan_process(priv->rxch, budget);
-
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_tx)
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpsw_enable_irq(priv);
}
+ if (num_rx || num_tx)
+ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+ num_rx, num_tx);
+
return num_rx;
}
@@ -562,6 +623,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
leader + strlen(name), val);
}
+static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+{
+ u32 i;
+ u32 usage_count = 0;
+
+ if (!priv->data.dual_emac)
+ return 0;
+
+ for (i = 0; i < priv->data.slaves; i++)
+ if (priv->slaves[i].open_stat)
+ usage_count++;
+
+ return usage_count;
+}
+
+static inline int cpsw_tx_packet_submit(struct net_device *ndev,
+ struct cpsw_priv *priv, struct sk_buff *skb)
+{
+ if (!priv->data.dual_emac)
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 0, GFP_KERNEL);
+
+ if (ndev == cpsw_get_slave_ndev(priv, 0))
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 1, GFP_KERNEL);
+ else
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 2, GFP_KERNEL);
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+ struct cpsw_priv *priv, struct cpsw_slave *slave,
+ u32 slave_port)
+{
+ u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+
+ if (priv->version == CPSW_VERSION_1)
+ slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+ else
+ slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+ cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, 0);
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, slave->port_vlan);
+}
+
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
char name[32];
@@ -591,8 +700,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_port = cpsw_get_slave_port(priv, slave->slave_num);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << slave_port, 0, ALE_MCAST_FWD_2);
+ if (priv->data.dual_emac)
+ cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+ else
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
@@ -607,14 +719,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+ const int vlan = priv->data.default_vlan;
+ const int port = priv->host_port;
+ u32 reg;
+ int i;
+
+ reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+
+ writel(vlan, &priv->host_port_regs->port_vlan);
+
+ for (i = 0; i < 2; i++)
+ slave_write(priv->slaves + i, vlan, reg);
+
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
+ ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
+ (ALE_PORT_1 | ALE_PORT_2) << port);
+}
+
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
+ u32 control_reg;
+ u32 fifo_mode;
+
/* soft reset the controller and initialize ale */
soft_reset("cpsw", &priv->regs->soft_reset);
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
+ cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&priv->regs->control);
+ control_reg |= CPSW_VLAN_AWARE;
+ writel(control_reg, &priv->regs->control);
+ fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ CPSW_FIFO_NORMAL_MODE;
+ writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
__raw_writel(CPDMA_TX_PRIORITY_MAP,
@@ -624,9 +766,12 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_control_set(priv->ale, priv->host_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
+ if (!priv->data.dual_emac) {
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ 0, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ }
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -635,7 +780,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- cpsw_intr_disable(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_intr_disable(priv);
netif_carrier_off(ndev);
pm_runtime_get_sync(&priv->pdev->dev);
@@ -647,43 +793,55 @@ static int cpsw_ndo_open(struct net_device *ndev)
CPSW_RTL_VERSION(reg));
/* initialize host and slave ports */
- cpsw_init_host_port(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
- /* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+ /* Add default VLAN */
+ if (!priv->data.dual_emac)
+ cpsw_add_default_vlan(priv);
- /* disable priority elevation and enable statistics on all ports */
- __raw_writel(0, &priv->regs->ptype);
+ if (!cpsw_common_res_usage_state(priv)) {
+ /* setup tx dma to fixed prio and zero offset */
+ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
- /* enable statistics collection only on the host port */
- __raw_writel(0x7, &priv->regs->stat_port_en);
+ /* disable priority elevation */
+ __raw_writel(0, &priv->regs->ptype);
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
+ /* enable statistics collection only on all ports */
+ __raw_writel(0x7, &priv->regs->stat_port_en);
- for (i = 0; i < priv->data.rx_descs; i++) {
- struct sk_buff *skb;
+ if (WARN_ON(!priv->data.rx_descs))
+ priv->data.rx_descs = 128;
- ret = -ENOMEM;
- skb = netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max);
- if (!skb)
- break;
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
- if (WARN_ON(ret < 0))
- break;
+ for (i = 0; i < priv->data.rx_descs; i++) {
+ struct sk_buff *skb;
+
+ ret = -ENOMEM;
+ skb = netdev_alloc_skb_ip_align(priv->ndev,
+ priv->rx_packet_max);
+ if (!skb)
+ break;
+ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+ skb_tailroom(skb), 0, GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+ /* continue even if we didn't manage to submit all
+ * receive descs
+ */
+ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
- /* continue even if we didn't manage to submit all receive descs */
- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = true;
return 0;
}
@@ -704,12 +862,17 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_stop_queue(priv->ndev);
napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
- cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
- cpdma_ctlr_stop(priv->dma);
- cpsw_ale_stop(priv->ale);
+
+ if (cpsw_common_res_usage_state(priv) <= 1) {
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
+ cpsw_ale_stop(priv->ale);
+ }
for_each_slave(priv, cpsw_slave_stop, priv);
pm_runtime_put_sync(&priv->pdev->dev);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -727,13 +890,13 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->cpts->tx_enable)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
- ret = cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, GFP_KERNEL);
+ ret = cpsw_tx_packet_submit(ndev, priv, skb);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
@@ -782,7 +945,7 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
u32 ts_en, seq_id;
- if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+ if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -790,10 +953,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ts_en |= CPSW_V1_TS_TX_EN;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -802,16 +965,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave;
u32 ctrl, mtype;
+ if (priv->data.dual_emac)
+ slave = &priv->slaves[priv->emac_port];
+ else
+ slave = &priv->slaves[priv->data.cpts_active_slave];
+
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ctrl |= CTRL_TX_TS_BITS;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ctrl |= CTRL_RX_TS_BITS;
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -824,7 +992,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = &priv->cpts;
+ struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -910,7 +1078,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
@@ -929,10 +1099,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
#endif
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ int ret;
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid,
+ ALE_ALL_PORTS << priv->host_port,
+ 0, ALE_ALL_PORTS << priv->host_port,
+ (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ ALE_ALL_PORTS << priv->host_port,
+ ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(priv->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ return cpsw_add_vlan_ale_entry(priv, vid);
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ return ret;
+
+ return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -947,6 +1186,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -984,7 +1225,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = priv->cpts.phc_index;
+ info->phc_index = priv->cpts->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1021,6 +1262,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->data = data;
slave->regs = regs + slave_reg_ofs;
slave->sliver = regs + sliver_reg_ofs;
+ slave->port_vlan = data->dual_emac_res_vlan;
}
static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -1101,6 +1343,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
+ if (!of_property_read_u32(node, "dual_emac", &prop))
+ data->dual_emac = prop;
+
/*
* Populate all the child nodes here...
*/
@@ -1134,6 +1379,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ if (data->dual_emac) {
+ if (of_property_read_u32(node, "dual_emac_res_vlan",
+ &prop)) {
+ pr_err("Missing dual_emac_res_vlan in DT.\n");
+ slave_data->dual_emac_res_vlan = i+1;
+ pr_err("Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
i++;
}
@@ -1144,6 +1401,79 @@ error_ret:
return ret;
}
+static int cpsw_probe_dual_emac(struct platform_device *pdev,
+ struct cpsw_priv *priv)
+{
+ struct cpsw_platform_data *data = &priv->data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv_sl2;
+ int ret = 0, i;
+
+ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ if (!ndev) {
+ pr_err("cpsw: error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv_sl2 = netdev_priv(ndev);
+ spin_lock_init(&priv_sl2->lock);
+ priv_sl2->data = *data;
+ priv_sl2->pdev = pdev;
+ priv_sl2->ndev = ndev;
+ priv_sl2->dev = &ndev->dev;
+ priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv_sl2->rx_packet_max = max(rx_packet_max, 128);
+
+ if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+ memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+ ETH_ALEN);
+ pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ } else {
+ random_ether_addr(priv_sl2->mac_addr);
+ pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ }
+ memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+
+ priv_sl2->slaves = priv->slaves;
+ priv_sl2->clk = priv->clk;
+
+ priv_sl2->cpsw_res = priv->cpsw_res;
+ priv_sl2->regs = priv->regs;
+ priv_sl2->host_port = priv->host_port;
+ priv_sl2->host_port_regs = priv->host_port_regs;
+ priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->dma = priv->dma;
+ priv_sl2->txch = priv->txch;
+ priv_sl2->rxch = priv->rxch;
+ priv_sl2->ale = priv->ale;
+ priv_sl2->emac_port = 1;
+ priv->slaves[1].ndev = ndev;
+ priv_sl2->cpts = priv->cpts;
+ priv_sl2->version = priv->version;
+
+ for (i = 0; i < priv->num_irqs; i++) {
+ priv_sl2->irqs_table[i] = priv->irqs_table[i];
+ priv_sl2->num_irqs = priv->num_irqs;
+ }
+
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("cpsw: error registering net device\n");
+ free_netdev(ndev);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -1170,6 +1500,11 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ if (!ndev) {
+ pr_err("error allocating cpts\n");
+ goto clean_ndev_ret;
+ }
/*
* This may be required here for child devices.
@@ -1202,6 +1537,9 @@ static int cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
+ priv->slaves[0].ndev = ndev;
+ priv->emac_port = 0;
+
priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "fck is not found\n");
@@ -1256,7 +1594,7 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1267,7 +1605,7 @@ static int cpsw_probe(struct platform_device *pdev)
break;
case CPSW_VERSION_2:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -1354,7 +1692,7 @@ static int cpsw_probe(struct platform_device *pdev)
k++;
}
- ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1369,13 +1707,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_irq_ret;
}
- if (cpts_register(&pdev->dev, &priv->cpts,
+ if (cpts_register(&pdev->dev, priv->cpts,
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
priv->cpsw_res->start, ndev->irq);
+ if (priv->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(pdev, priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_irq_ret;
+ }
+ }
+
return 0;
clean_irq_ret:
@@ -1414,7 +1760,7 @@ static int cpsw_remove(struct platform_device *pdev)
pr_info("removing device");
platform_set_drvdata(pdev, NULL);
- cpts_unregister(&priv->cpts);
+ cpts_unregister(priv->cpts);
free_irq(ndev->irq, priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0e9ccc2cf91..7fa60d6092e 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
if (memcmp(entry_addr, addr, 6) == 0)
return idx;
@@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
return -ENOENT;
}
+int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+ return idx;
+ }
+ return -ENOENT;
+}
+
static int cpsw_ale_match_free(struct cpsw_ale *ale)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
return 0;
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+ int flags, u16 vid)
+{
+ if (flags & ALE_VLAN) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+ } else {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ }
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_port_num(ale_entry, port);
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
@@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
return 0;
}
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -ENOENT;
@@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
}
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state)
+ int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx, mask;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
- cpsw_ale_set_super(ale_entry, super);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry);
@@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
return 0;
}
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
@@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
return 0;
}
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+
+ cpsw_ale_set_vlan_untag_force(ale_entry, untag);
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+ cpsw_ale_set_vlan_member_list(ale_entry, port);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ if (port_mask)
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
struct ale_control_info {
const char *name;
int offset, port_offset;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 2bd09cbce52..30daa1265f0 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -64,8 +64,14 @@ enum cpsw_ale_port_state {
};
/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
-#define ALE_SECURE 1
-#define ALE_BLOCKED 2
+#define ALE_SECURE BIT(0)
+#define ALE_BLOCKED BIT(1)
+#define ALE_SUPER BIT(2)
+#define ALE_VLAN BIT(3)
+
+#define ALE_PORT_HOST BIT(0)
+#define ALE_PORT_1 BIT(1)
+#define ALE_PORT_2 BIT(2)
#define ALE_MCAST_FWD 0
#define ALE_MCAST_BLOCK_LEARN_FWD 1
@@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
+ int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index f8629186afb..68c3418160b 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -60,6 +60,9 @@
#define CPDMA_DESC_EOQ BIT(28)
#define CPDMA_DESC_TD_COMPLETE BIT(27)
#define CPDMA_DESC_PASS_CRC BIT(26)
+#define CPDMA_DESC_TO_PORT_EN BIT(20)
+#define CPDMA_TO_PORT_SHIFT 16
+#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
@@ -132,6 +135,14 @@ struct cpdma_chan {
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+#define cpdma_desc_to_port(chan, mode, directed) \
+ do { \
+ if (!is_rx_chan(chan) && ((directed == 1) || \
+ (directed == 2))) \
+ mode |= (CPDMA_DESC_TO_PORT_EN | \
+ (directed << CPDMA_TO_PORT_SHIFT)); \
+ } while (0)
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -449,10 +460,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
- if (ctlr->channels[i])
- cpdma_chan_destroy(ctlr->channels[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -484,9 +493,9 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
return 0;
}
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
- dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
@@ -662,7 +671,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask)
+ int len, int directed, gfp_t gfp_mask)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
@@ -692,6 +701,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+ cpdma_desc_to_port(chan, mode, directed);
desc_write(desc, hw_next, 0);
desc_write(desc, hw_buffer, buffer);
@@ -782,7 +792,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
status = -EBUSY;
goto unlock_ret;
}
- status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+ CPDMA_DESC_PORT_MASK);
chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
chan_write(chan, cp, desc_dma);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 8d2aeb2096a..d9bcc6032fd 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -24,6 +24,13 @@
#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
#define chan_linear(chan) __chan_linear((chan)->chan_num)
+#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
+
+#define CPDMA_EOI_RX_THRESH 0x0
+#define CPDMA_EOI_RX 0x1
+#define CPDMA_EOI_TX 0x2
+#define CPDMA_EOI_MISC 0x3
+
struct cpdma_params {
struct device *dev;
void __iomem *dmaregs;
@@ -82,11 +89,11 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask);
+ int len, int directed, gfp_t gfp_mask);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 242ec55110d..52c05366599 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1037,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
WARN_ON(ret == -ENOMEM);
if (unlikely(ret < 0))
@@ -1092,7 +1092,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
if (unlikely(ret_code != 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
break;
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
if (WARN_ON(ret < 0))
break;
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ec4a5e1c6fb..185c721c52d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1812,7 +1812,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ dev_kfree_skb(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -2024,11 +2024,7 @@ static void rhine_slow_event_task(struct work_struct *work)
if (intr_status & IntrPCIErr)
netif_warn(rp, hw, dev, "PCI error\n");
- napi_disable(&rp->napi);
- rhine_irq_disable(rp);
- /* Slow and safe. Consider __napi_schedule as a replacement ? */
- napi_enable(&rp->napi);
- napi_schedule(&rp->napi);
+ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
out_unlock:
mutex_unlock(&rp->task_lock);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index c2e5497397d..02de6c89167 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -586,7 +586,8 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
static int __init bpq_init_driver(void)
{
#ifdef CONFIG_PROC_FS
- if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) {
+ if (!proc_create("bpqether", S_IRUGO, init_net.proc_net,
+ &bpq_info_fops)) {
printk(KERN_ERR
"bpq: cannot create /proc/net/bpqether entry.\n");
return -ENOENT;
@@ -610,7 +611,7 @@ static void __exit bpq_cleanup_driver(void)
unregister_netdevice_notifier(&bpq_dev_notifier);
- proc_net_remove(&init_net, "bpqether");
+ remove_proc_entry("bpqether", init_net.proc_net);
rtnl_lock();
while (!list_empty(&bpq_devices)) {
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 1b4a47bd32b..bc1d5217038 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -2118,7 +2118,7 @@ static int __init scc_init_driver (void)
}
rtnl_unlock();
- proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops);
+ proc_create("z8530drv", 0, init_net.proc_net, &scc_net_seq_fops);
return 0;
}
@@ -2173,7 +2173,7 @@ static void __exit scc_cleanup_driver(void)
if (Vector_Latch)
release_region(Vector_Latch, 1);
- proc_net_remove(&init_net, "z8530drv");
+ remove_proc_entry("z8530drv", init_net.proc_net);
}
MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index c6645f1017a..4cf8f1017aa 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1167,7 +1167,7 @@ static int __init yam_init_driver(void)
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
- proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops);
+ proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops);
return 0;
error:
while (--i >= 0) {
@@ -1199,7 +1199,7 @@ static void __exit yam_cleanup_driver(void)
kfree(p);
}
- proc_net_remove(&init_net, "yam");
+ remove_proc_entry("yam", init_net.proc_net);
}
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index a4a62e170ec..fc1687ea4a4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -751,16 +751,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
-{
- return 0;
-}
-
-static int at86rf230_resume(struct spi_device *spi)
-{
- return 0;
-}
-
static int at86rf230_fill_data(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -948,8 +938,6 @@ static struct spi_driver at86rf230_driver = {
},
.probe = at86rf230_probe,
.remove = at86rf230_remove,
- .suspend = at86rf230_suspend,
- .resume = at86rf230_resume,
};
module_spi_driver(at86rf230_driver);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 7b44ebd7770..defcd8a8574 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -29,6 +29,7 @@
#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
+#include <linux/hash.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
@@ -126,6 +127,21 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
return vlan->receive(skb);
}
+static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
+{
+ return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
+}
+
+
+static unsigned int mc_hash(const struct macvlan_dev *vlan,
+ const unsigned char *addr)
+{
+ u32 val = __get_unaligned_cpu32(addr + 2);
+
+ val ^= macvlan_hash_mix(vlan);
+ return hash_32(val, MACVLAN_MC_FILTER_BITS);
+}
+
static void macvlan_broadcast(struct sk_buff *skb,
const struct macvlan_port *port,
struct net_device *src,
@@ -137,6 +153,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct sk_buff *nskb;
unsigned int i;
int err;
+ unsigned int hash;
if (skb->protocol == htons(ETH_P_PAUSE))
return;
@@ -146,6 +163,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (vlan->dev == src || !(vlan->mode & mode))
continue;
+ hash = mc_hash(vlan, eth->h_dest);
+ if (!test_bit(hash, vlan->mc_filter))
+ continue;
nskb = skb_clone(skb, GFP_ATOMIC);
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
@@ -405,6 +425,21 @@ static void macvlan_set_mac_lists(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
+ } else {
+ struct netdev_hw_addr *ha;
+ DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
+
+ bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
+ netdev_for_each_mc_addr(ha, dev) {
+ __set_bit(mc_hash(vlan, ha->addr), filter);
+ }
+
+ __set_bit(mc_hash(vlan, dev->broadcast), filter);
+
+ bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
+ }
dev_uc_sync(vlan->lowerdev, dev);
dev_mc_sync(vlan->lowerdev, dev);
}
@@ -564,7 +599,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int macvlan_fdb_del(struct ndmsg *ndm,
+static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index b181dfb3d6d..97243011d31 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -543,7 +543,6 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
skb->data_len += len;
skb->len += len;
skb->truesize += truesize;
- skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
while (len) {
int off = base & ~PAGE_MASK;
@@ -599,7 +598,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
- skb_shinfo(skb)->gso_type |= gso_type;
+ skb_shinfo(skb)->gso_type = gso_type;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -743,6 +742,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = m->msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0b2706abe3e..4fd754e74eb 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1805,8 +1805,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
- if (skb_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_unclone(skb, GFP_ATOMIC))
goto err;
*skb_push(skb, 2) = 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 20f31d0d153..bb07ba94c3a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1134,7 +1134,7 @@ static __net_init int pppoe_init_net(struct net *net)
rwlock_init(&pn->hash_lock);
- pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
+ pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops);
#ifdef CONFIG_PROC_FS
if (!pde)
return -ENOMEM;
@@ -1145,7 +1145,7 @@ static __net_init int pppoe_init_net(struct net *net)
static __net_exit void pppoe_exit_net(struct net *net)
{
- proc_net_remove(net, "pppoe");
+ remove_proc_entry("pppoe", net->proc_net);
}
static struct pernet_operations pppoe_net_ops = {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 694ccf6d71a..05c5efe8459 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -508,6 +508,7 @@ static bool team_is_mode_set(struct team *team)
static void team_set_no_mode(struct team *team)
{
+ team->user_carrier_enabled = false;
team->mode = &__team_no_mode;
}
@@ -1710,6 +1711,10 @@ static netdev_features_t team_fix_features(struct net_device *dev,
static int team_change_carrier(struct net_device *dev, bool new_carrier)
{
+ struct team *team = netdev_priv(dev);
+
+ team->user_carrier_enabled = true;
+
if (new_carrier)
netif_carrier_on(dev);
else
@@ -2573,6 +2578,9 @@ static void __team_carrier_check(struct team *team)
struct team_port *port;
bool team_linkup;
+ if (team->user_carrier_enabled)
+ return;
+
team_linkup = false;
list_for_each_entry(port, &team->port_list, list) {
if (port->linkup) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8d208dd9296..b6f45c5d84d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
- u16 queue_index)
+ struct tun_file *tfile)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
+ u16 queue_index = tfile->queue_index;
if (!rxhash)
return;
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_lock();
- if (tun->numqueues == 1)
+ /* We may get a very small possibility of OOO during switching, not
+ * worth to optimize.*/
+ if (tun->numqueues == 1 || tfile->detached)
goto unlock;
e = tun_flow_find(head, rxhash);
@@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
- if (tun) {
+ if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
- rcu_assign_pointer(tfile->tun, NULL);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
- if (clean)
+ if (clean) {
+ rcu_assign_pointer(tfile->tun, NULL);
sock_put(&tfile->sk);
- else
+ } else
tun_disable_queue(tun, tfile);
synchronize_net();
@@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
}
if (clean) {
- if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
- !(tun->flags & TUN_PERSIST))
- if (tun->dev->reg_state == NETREG_REGISTERED)
+ if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+ netif_carrier_off(tun->dev);
+
+ if (!(tun->flags & TUN_PERSIST) &&
+ tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
+ }
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
&tfile->socket.flags));
@@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
rcu_assign_pointer(tfile->tun, NULL);
--tun->numqueues;
}
+ list_for_each_entry(tfile, &tun->disabled, next) {
+ wake_up_all(&tfile->wq.wait);
+ rcu_assign_pointer(tfile->tun, NULL);
+ }
BUG_ON(tun->numqueues != 0);
synchronize_net();
@@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
goto out;
err = -EINVAL;
- if (rtnl_dereference(tfile->tun))
+ if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
@@ -1009,7 +1019,6 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
skb->data_len += len;
skb->len += len;
skb->truesize += truesize;
- skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
while (len) {
int off = base & ~PAGE_MASK;
@@ -1155,18 +1164,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- unsigned short gso_type = 0;
-
pr_debug("GSO!\n");
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
- gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
- gso_type = SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- gso_type = SKB_GSO_UDP;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
default:
tun->dev->stats.rx_frame_errors++;
@@ -1175,10 +1182,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- gso_type |= SKB_GSO_TCP_ECN;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = gso.gso_size;
- skb_shinfo(skb)->gso_type |= gso_type;
if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
@@ -1194,6 +1200,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
skb_reset_network_header(skb);
@@ -1203,7 +1210,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
- tun_flow_update(tun, rxhash, tfile->queue_index);
+ tun_flow_update(tun, rxhash, tfile);
return total_len;
}
@@ -1662,10 +1669,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
-
- netif_carrier_on(tun->dev);
}
+ netif_carrier_on(tun->dev);
+
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
@@ -1817,7 +1824,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = tun_attach(tun, file);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
- if (!tun || !(tun->flags & TUN_TAP_MQ))
+ if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b5ad7eadc4f..4a8c25a2229 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -576,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if ((intf->num_altsetting == 2) &&
!usb_set_interface(dev->udev,
intf->cur_altsetting->desc.bInterfaceNumber,
- CDC_NCM_COMM_ALTSETTING_MBIM) &&
- cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
- return -ENODEV;
+ CDC_NCM_COMM_ALTSETTING_MBIM)) {
+ if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ return -ENODEV;
+ else
+ usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_NCM);
+ }
#endif
/* NCM data altsetting is always 1 */
@@ -1215,6 +1220,9 @@ static const struct usb_device_id cdc_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&wwan_info,
+ },
/* Infineon(now Intel) HSPA Modem platform */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8ee5ab0db12..73051d10ead 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -149,11 +149,9 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
DECLARE_WAITQUEUE(wait, current);
buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer) {
- netif_warn(pegasus, drv, pegasus->net,
- "out of memory in %s\n", __func__);
+ if (!buffer)
return -ENOMEM;
- }
+
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 575a5839ee3..19d903598b0 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
@@ -399,6 +411,7 @@ static const struct usb_device_id products[] = {
},
/* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
@@ -461,6 +474,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 30c1b330e98..51f3192f393 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
unsigned long lockflags;
size_t size = dev->rx_urb_size;
+ /* prevent rx skb allocation when error ratio is high */
+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+ usb_free_urb(urb);
+ return -ENOLINK;
+ }
+
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
break;
}
+ /* stop rx if packet error rate is high */
+ if (++dev->pkt_cnt > 30) {
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ } else {
+ if (state == rx_cleanup)
+ dev->pkt_err++;
+ if (dev->pkt_err > 20)
+ set_bit(EVENT_RX_KILL, &dev->flags);
+ }
+
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
"simple");
+ /* reset rx error state */
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- if (netif_msg_tx_err(dev)) {
- netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
- goto drop;
- } else {
- /* cdc_ncm collected packet; waits for more */
+ /* packet collected; minidriver waiting for more */
+ if (info->flags & FLAG_MULTI_PACKET)
goto not_drop;
- }
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+ goto drop;
}
}
length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
}
}
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e1da42aaf9d..07a4af0aa3d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -426,12 +426,13 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
* not being freed before one RCU grace period.
*/
RCU_INIT_POINTER(priv->peer, NULL);
-
- priv = netdev_priv(peer);
- RCU_INIT_POINTER(priv->peer, NULL);
-
unregister_netdevice_queue(dev, head);
- unregister_netdevice_queue(peer, head);
+
+ if (peer) {
+ priv = netdev_priv(peer);
+ RCU_INIT_POINTER(priv->peer, NULL);
+ unregister_netdevice_queue(peer, head);
+ }
}
static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 381a2d8d8a8..192c91c8e79 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -227,7 +227,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
*len -= size;
}
@@ -387,18 +387,16 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
ntohs(skb->protocol), skb->len, skb->pkt_type);
if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- unsigned short gso_type = 0;
-
pr_debug("GSO!\n");
switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
- gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- gso_type = SKB_GSO_UDP;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
- gso_type = SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
net_warn_ratelimited("%s: bad gso type %u.\n",
@@ -407,7 +405,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
}
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- gso_type |= SKB_GSO_TCP_ECN;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
@@ -415,7 +413,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
goto frame_err;
}
- skb_shinfo(skb)->gso_type |= gso_type;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b1c90f8ccd3..ffb97b2a15a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -150,8 +150,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
if (ret & 1) { /* Link is up. */
netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
adapter->link_speed);
- if (!netif_carrier_ok(adapter->netdev))
- netif_carrier_on(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -160,8 +159,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
}
} else {
netdev_info(adapter->netdev, "NIC Link is Down\n");
- if (netif_carrier_ok(adapter->netdev))
- netif_carrier_off(adapter->netdev);
+ netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3060,6 +3058,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+ netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 72485b9b900..9d70421cf3a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -393,7 +393,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
/* Delete entry (via netlink) */
-static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
const unsigned char *addr)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 8077e6e3593..0b602951ff6 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1346,7 +1346,6 @@ EXPORT_SYMBOL(i2400m_unknown_barker);
int i2400m_rx_setup(struct i2400m *i2400m)
{
int result = 0;
- struct device *dev = i2400m_dev(i2400m);
i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
if (i2400m->rx_reorder) {
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index ab363f34b4d..a78afa98c65 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1613,6 +1613,10 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
+ if (WARN_ON(ee_mode < 0)) {
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+ return;
+ }
/* completed NF calibration, test threshold */
nf = ath5k_hw_read_measured_noise_floor(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 4084b107628..e2d8b2cf19e 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -985,6 +985,8 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
return;
ee_mode = ath5k_eeprom_mode_from_channel(channel);
+ if (WARN_ON(ee_mode < 0))
+ return;
/* Adjust power delta for channel 14 */
if (channel->center_freq == 2484)
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 4225cca0f19..752ffc4f416 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -427,6 +427,30 @@ static bool ath6kl_is_tx_pending(struct ath6kl *ar)
return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
}
+static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif,
+ bool enable)
+{
+ int err;
+
+ if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+ return;
+
+ if (vif->nw_type != INFRA_NETWORK)
+ return;
+
+ if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+ vif->ar->fw_capabilities))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+ enable ? "enable" : "disable");
+
+ err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, enable);
+ if (err)
+ ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+ enable ? "enable" : "disable", err);
+}
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
@@ -616,13 +640,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->req_bssid, vif->ch_hint,
ar->connect_ctrl_flags, nw_subtype);
- /* disable background scan if period is 0 */
- if (sme->bg_scan_period == 0)
+ if (sme->bg_scan_period == 0) {
+ /* disable background scan if period is 0 */
sme->bg_scan_period = 0xffff;
-
- /* configure default value if not specified */
- if (sme->bg_scan_period == -1)
+ } else if (sme->bg_scan_period == -1) {
+ /* configure default value if not specified */
sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
+ }
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
@@ -767,7 +791,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -778,7 +802,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
@@ -1454,10 +1478,10 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return -EIO;
if (pmgmt) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
mode.pwr_mode = REC_POWER;
} else {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
mode.pwr_mode = MAX_PERF_POWER;
}
@@ -1509,7 +1533,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
- ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
ath6kl_cfg80211_vif_cleanup(vif);
@@ -1559,17 +1583,13 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- vif->next_mode = AP_NETWORK;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- vif->next_mode = INFRA_NETWORK;
- break;
case NL80211_IFTYPE_P2P_GO:
vif->next_mode = AP_NETWORK;
break;
@@ -1778,14 +1798,14 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->filled |= STATION_INFO_RX_BYTES64;
sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->filled |= STATION_INFO_TX_BYTES64;
sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
@@ -2673,30 +2693,6 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
return 0;
}
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
-{
- int err;
-
- if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
- return;
-
- if (vif->nw_type != INFRA_NETWORK)
- return;
-
- if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
- vif->ar->fw_capabilities))
- return;
-
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
- enable ? "enable" : "disable");
-
- err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
- vif->fw_vif_idx, enable);
- if (err)
- ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
- enable ? "enable" : "disable", err);
-}
-
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
u8 *rsn_capab)
{
@@ -2776,9 +2772,11 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
ar->ap_mode_bkey.valid = false;
- /* TODO:
- * info->interval
- */
+ ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx,
+ info->beacon_interval);
+
+ if (ret)
+ ath6kl_warn("Failed to set beacon interval: %d\n", ret);
ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
info->dtim_period);
@@ -3557,6 +3555,37 @@ static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
return 0;
}
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
+{
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
+
+ netif_stop_queue(vif->ndev);
+
+ clear_bit(WLAN_ENABLED, &vif->flags);
+
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
+
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+}
+
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index e5e70f3a8ca..b59becd91ae 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -61,7 +61,5 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
struct ath6kl *ath6kl_cfg80211_create(void);
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
-/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 189d8faf8c8..61b2f98b4e7 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -940,7 +940,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
bool wait_fot_compltn, bool cold_reset);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
int ath6kl_init_hw_start(struct ath6kl *ar);
int ath6kl_init_hw_stop(struct ath6kl *ar);
int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index ba6bd497b78..281390178e3 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -509,9 +509,7 @@ static void destroy_htc_txctrl_packet(struct htc_packet *packet)
{
struct sk_buff *skb;
skb = packet->skb;
- if (skb != NULL)
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
kfree(packet);
}
@@ -969,6 +967,22 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
u16 payload_len;
int status = 0;
+ /*
+ * ar->htc_target can be NULL due to a race condition that can occur
+ * during driver initialization(we do 'ath6kl_hif_power_on' before
+ * initializing 'ar->htc_target' via 'ath6kl_htc_create').
+ * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
+ * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
+ * Thus the possibility of ar->htc_target being NULL
+ * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+ */
+ if (WARN_ON_ONCE(!target)) {
+ ath6kl_err("Target not yet initialized\n");
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+
netdata = skb->data;
netlen = skb->len;
@@ -1054,6 +1068,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
dev_kfree_skb(skb);
skb = NULL;
+
goto free_skb;
}
@@ -1089,8 +1104,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
skb = NULL;
free_skb:
- if (skb != NULL)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return status;
@@ -1184,7 +1198,7 @@ static void reset_endpoint_states(struct htc_target *target)
INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
INIT_LIST_HEAD(&ep->rx_bufq);
ep->target = target;
- ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
+ ep->pipe.tx_credit_flow_enabled = true;
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index f21fa322e5c..5d434cf88f3 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1715,38 +1715,6 @@ void ath6kl_init_hw_restart(struct ath6kl *ar)
}
}
-/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
-{
- static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- bool discon_issued;
-
- netif_stop_queue(vif->ndev);
-
- clear_bit(WLAN_ENABLED, &vif->flags);
-
- if (wmi_ready) {
- discon_issued = test_bit(CONNECTED, &vif->flags) ||
- test_bit(CONNECT_PEND, &vif->flags);
- ath6kl_disconnect(vif);
- del_timer(&vif->disconnect_timer);
-
- if (discon_issued)
- ath6kl_disconnect_event(vif, DISCONNECT_CMD,
- (vif->nw_type & AP_NETWORK) ?
- bcast_mac : vif->bssid,
- 0, NULL, 0);
- }
-
- if (vif->scan_req) {
- cfg80211_scan_done(vif->scan_req, true);
- vif->scan_req = NULL;
- }
-
- /* need to clean up enhanced bmiss detection fw state */
- ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
-}
-
void ath6kl_stop_txrx(struct ath6kl *ar)
{
struct ath6kl_vif *vif, *tmp_vif;
@@ -1766,7 +1734,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
- ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
ath6kl_cfg80211_vif_cleanup(vif);
rtnl_unlock();
@@ -1801,8 +1769,6 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
"attempting to reset target on instance destroy\n");
ath6kl_reset_device(ar, ar->target_type, true, true);
- clear_bit(WLAN_ENABLED, &ar->flag);
-
up(&ar->sem);
}
EXPORT_SYMBOL(ath6kl_stop_txrx);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 62bcc0d5bc2..5fcd342762d 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -159,10 +159,8 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
{
- if (urb_context->skb != NULL) {
- dev_kfree_skb(urb_context->skb);
- urb_context->skb = NULL;
- }
+ dev_kfree_skb(urb_context->skb);
+ urb_context->skb = NULL;
ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 998f8b0f62f..d76b5bd81a0 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -751,6 +751,23 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_intvl)
+{
+ struct sk_buff *skb;
+ struct set_beacon_int_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_beacon_int_cmd *) skb->data;
+
+ cmd->beacon_intvl = cpu_to_le32(beacon_intvl);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG);
+}
+
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
{
struct sk_buff *skb;
@@ -1108,7 +1125,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
kfree(mgmt);
if (bss == NULL)
return -ENOMEM;
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
/*
* Firmware doesn't return any event when scheduled scan has
@@ -2480,16 +2497,11 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
free_cmd_skb:
/* free up any resources left over (possibly due to an error) */
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
free_data_skb:
- for (index = 0; index < num_pri_streams; index++) {
- if (data_sync_bufs[index].skb != NULL) {
- dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
- skb);
- }
- }
+ for (index = 0; index < num_pri_streams; index++)
+ dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 98b1755e67f..b5f226503ba 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -1660,6 +1660,10 @@ struct roam_ctrl_cmd {
u8 roam_ctrl;
} __packed;
+struct set_beacon_int_cmd {
+ __le32 beacon_intvl;
+} __packed;
+
struct set_dtim_cmd {
__le32 dtim_period;
} __packed;
@@ -2649,6 +2653,8 @@ int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_interval);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7647ed6b73d..17507dc8a1e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -58,6 +58,7 @@ config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
depends on ATH9K
select MAC80211_DEBUGFS
+ select RELAY
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b2d6c18d167..a56b2416e2f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -319,6 +319,8 @@ struct ath_rx {
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
struct sk_buff *frag;
+
+ u32 ampdu_ref;
};
int ath_startrecv(struct ath_softc *sc);
@@ -387,6 +389,7 @@ struct ath_beacon_config {
u16 bmiss_timeout;
u8 dtim_count;
bool enable_beacon;
+ bool ibss_creator;
};
struct ath_beacon {
@@ -754,6 +757,7 @@ struct ath_softc {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
enum spectral_mode spectral_mode;
+ struct ath_spec_scan spec_config;
int scanning;
#ifdef CONFIG_PM_SLEEP
@@ -863,31 +867,31 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
* interface.
*/
enum ath_fft_sample_type {
- ATH_FFT_SAMPLE_HT20 = 0,
+ ATH_FFT_SAMPLE_HT20 = 1,
};
struct fft_sample_tlv {
u8 type; /* see ath_fft_sample */
- u16 length;
+ __be16 length;
/* type dependent data follows */
} __packed;
struct fft_sample_ht20 {
struct fft_sample_tlv tlv;
- u8 __alignment;
+ u8 max_exp;
- u16 freq;
+ __be16 freq;
s8 rssi;
s8 noise;
- u16 max_magnitude;
+ __be16 max_magnitude;
u8 max_index;
u8 bitmap_weight;
- u64 tsf;
+ __be64 tsf;
- u16 data[SPECTRAL_HT20_NUM_BINS];
+ u8 data[SPECTRAL_HT20_NUM_BINS];
} __packed;
void ath9k_tasklet(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index dd3771954bd..5f05c26d1ec 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -407,12 +407,17 @@ void ath9k_beacon_tasklet(unsigned long data)
}
}
-static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
+ u32 intval, bool reset_tsf)
{
struct ath_hw *ah = sc->sc_ah;
ath9k_hw_disable_interrupts(ah);
- ath9k_hw_reset_tsf(ah);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
ath9k_beaconq_config(sc);
ath9k_hw_beaconinit(ah, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
@@ -442,10 +447,12 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
else
ah->imask &= ~ATH9K_INT_SWBA;
- ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n",
+ ath_dbg(common, BEACON,
+ "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval);
+ ath9k_beacon_init(sc, nexttbtt, intval, true);
}
/*
@@ -586,17 +593,45 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
ath9k_reset_beacon_status(sc);
intval = TU_TO_USEC(conf->beacon_interval);
- nexttbtt = intval;
+
+ if (conf->ibss_creator) {
+ nexttbtt = intval;
+ } else {
+ u32 tbtt, offset, tsftu;
+ u64 tsf;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * sync'd TSF.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ offset = tsftu % conf->beacon_interval;
+ tbtt = tsftu - offset;
+ if (offset)
+ tbtt += conf->beacon_interval;
+
+ nexttbtt = TU_TO_USEC(tbtt);
+ }
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
- ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n",
+ ath_dbg(common, BEACON,
+ "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval);
+ ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+
+ /*
+ * Set the global 'beacon has been configured' flag for the
+ * joiner case in IBSS mode.
+ */
+ if (!conf->ibss_creator && conf->enable_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
}
bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
@@ -639,6 +674,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->listen_interval = 1;
cur_conf->dtim_count = 1;
+ cur_conf->ibss_creator = bss_conf->ibss_creator;
cur_conf->bmiss_timeout =
ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
@@ -666,34 +702,59 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ unsigned long flags;
+ bool skip_beacon = false;
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
set_bit(SC_OP_BEACONS, &sc->sc_flags);
- } else {
- /*
- * Take care of multiple interfaces when
- * enabling/disabling SWBA.
- */
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (!bss_conf->enable_beacon &&
- (sc->nbcnvifs <= 1)) {
- cur_conf->enable_beacon = false;
- } else if (bss_conf->enable_beacon) {
- cur_conf->enable_beacon = true;
- ath9k_cache_beacon_config(sc, bss_conf);
- }
+ return;
+
+ }
+
+ /*
+ * Take care of multiple interfaces when
+ * enabling/disabling SWBA.
+ */
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon &&
+ (sc->nbcnvifs <= 1)) {
+ cur_conf->enable_beacon = false;
+ } else if (bss_conf->enable_beacon) {
+ cur_conf->enable_beacon = true;
+ ath9k_cache_beacon_config(sc, bss_conf);
}
+ }
- if (cur_conf->beacon_interval) {
+ /*
+ * Configure the HW beacon registers only when we have a valid
+ * beacon interval.
+ */
+ if (cur_conf->beacon_interval) {
+ /*
+ * If we are joining an existing IBSS network, start beaconing
+ * only after a TSF-sync has taken place. Ensure that this
+ * happens by setting the appropriate flags.
+ */
+ if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator &&
+ bss_conf->enable_beacon) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ skip_beacon = true;
+ } else {
ath9k_set_beacon(sc);
-
- if (cur_conf->enable_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
- else
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
+
+ /*
+ * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+ * here, it is done in ath9k_beacon_config_adhoc().
+ */
+ if (cur_conf->enable_beacon && !skip_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ else
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6c5d313ebcb..3714b971d18 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -895,6 +895,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("RX-Bytes-All", rx_bytes_all);
RXS_ERR("RX-Beacons", rx_beacons);
RXS_ERR("RX-Frags", rx_frags);
+ RXS_ERR("RX-Spectral", rx_spectral);
if (len > size)
len = size;
@@ -1035,6 +1036,182 @@ static const struct file_operations fops_spec_scan_ctl = {
.llseek = default_llseek,
};
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ sc->spec_config.short_repeat = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.count = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 15)
+ return -EINVAL;
+
+ sc->spec_config.fft_period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
@@ -1059,11 +1236,13 @@ static int remove_buf_file_handler(struct dentry *dentry)
void ath_debug_send_fft_sample(struct ath_softc *sc,
struct fft_sample_tlv *fft_sample_tlv)
{
+ int length;
if (!sc->rfs_chan_spec_scan)
return;
- relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv,
- fft_sample_tlv->length + sizeof(*fft_sample_tlv));
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
}
static struct rchan_callbacks rfs_spec_scan_cb = {
@@ -1893,6 +2072,16 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc,
&fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_spectral_count);
+ debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_fft_period);
#ifdef CONFIG_ATH9K_MAC_DEBUG
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index a22c0d78070..410d6d8f1aa 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -219,6 +219,7 @@ struct ath_tx_stats {
* @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
*/
struct ath_rx_stats {
u32 rx_pkts_all;
@@ -237,6 +238,7 @@ struct ath_rx_stats {
u32 rx_too_many_frags_err;
u32 rx_beacons;
u32 rx_frags;
+ u32 rx_spectral;
};
struct ath_stats {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b6a5a08810b..3ad1fd05c5e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1196,20 +1196,17 @@ void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
int ath9k_rx_init(struct ath9k_htc_priv *priv)
{
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_rxbuf *rxbuf;
int i = 0;
INIT_LIST_HEAD(&priv->rx.rxbuf);
spin_lock_init(&priv->rx.rxbuflock);
for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
- rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
- if (rxbuf == NULL) {
- ath_err(common, "Unable to allocate RX buffers\n");
+ struct ath9k_htc_rxbuf *rxbuf =
+ kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL)
goto err;
- }
+
list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 42cf3c7f1e2..2a2ae403e0e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2981,13 +2981,8 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer *timer;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
-
- if (timer == NULL) {
- ath_err(ath9k_hw_common(ah),
- "Failed to allocate memory for hw timer[%d]\n",
- timer_index);
+ if (timer == NULL)
return NULL;
- }
/* allocate a hardware generic timer slot */
timer_table->timers[timer_index] = timer;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 4b1abc7da98..af932c9444d 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -497,6 +497,13 @@ static void ath9k_init_misc(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
+
+ sc->spec_config.enabled = 0;
+ sc->spec_config.short_repeat = true;
+ sc->spec_config.count = 8;
+ sc->spec_config.endless = false;
+ sc->spec_config.period = 0xFF;
+ sc->spec_config.fft_period = 0xF;
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -915,7 +922,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
ath9k_eeprom_release(sc);
- if (sc->rfs_chan_spec_scan) {
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
relay_close(sc->rfs_chan_spec_scan);
sc->rfs_chan_spec_scan = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index b42be910a83..811007ec07a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -605,13 +605,13 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
* reported, then decryption and MIC errors are irrelevant,
* the frame is going to be dropped either way
*/
- if (ads.ds_rxstatus8 & AR_CRCErr)
- rs->rs_status |= ATH9K_RXERR_CRC;
- else if (ads.ds_rxstatus8 & AR_PHYErr) {
+ if (ads.ds_rxstatus8 & AR_PHYErr) {
rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
rs->rs_phyerr = phyerr;
- } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ } else if (ads.ds_rxstatus8 & AR_CRCErr)
+ rs->rs_status |= ATH9K_RXERR_CRC;
+ else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 4b72b660f18..6e66f9c6782 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -320,28 +320,25 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct ath_node *an;
- u8 density;
an = (struct ath_node *)sta->drv_priv;
an->sc = sc;
an->sta = sta;
an->vif = vif;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- ath_tx_node_init(sc, an);
+ ath_tx_node_init(sc, an);
+
+ if (sta->ht_cap.ht_supported) {
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
- density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
- an->mpdudensity = density;
+ an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
}
}
static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_tx_node_cleanup(sc, an);
+ ath_tx_node_cleanup(sc, an);
}
void ath9k_tasklet(unsigned long data)
@@ -1099,45 +1096,34 @@ int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_spec_scan param;
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
ath_err(common, "spectrum analyzer not implemented on this hardware\n");
return -1;
}
- /* NOTE: this will generate a few samples ...
- *
- * TODO: review default parameters, and/or define an interface to set
- * them.
- */
- param.enabled = 1;
- param.short_repeat = true;
- param.count = 8;
- param.endless = false;
- param.period = 0xFF;
- param.fft_period = 0xF;
-
switch (spectral_mode) {
case SPECTRAL_DISABLED:
- param.enabled = 0;
+ sc->spec_config.enabled = 0;
break;
case SPECTRAL_BACKGROUND:
/* send endless samples.
* TODO: is this really useful for "background"?
*/
- param.endless = 1;
+ sc->spec_config.endless = 1;
+ sc->spec_config.enabled = 1;
break;
case SPECTRAL_CHANSCAN:
- break;
case SPECTRAL_MANUAL:
+ sc->spec_config.endless = 0;
+ sc->spec_config.enabled = 1;
break;
default:
return -1;
}
ath9k_ps_wakeup(sc);
- ath9k_hw_ops(ah)->spectral_scan_config(ah, &param);
+ ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
ath9k_ps_restore(sc);
sc->spectral_mode = spectral_mode;
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index d2074334ec9..815bee21c19 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -474,8 +474,6 @@ void ath_mci_cleanup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
- struct ath_mci_coex *mci = &sc->mci_coex;
- struct ath_mci_buf *buf = &mci->sched_buf;
ar9003_mci_cleanup(ah);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 714558d1ba7..96ac433ba7f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1204,7 +1204,7 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
else if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
caps |= WLAN_RC_40_FLAG;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
caps |= WLAN_RC_SGI_FLAG;
@@ -1452,17 +1452,7 @@ static void ath_rate_free(void *priv)
static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *rate_priv;
-
- rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
- if (!rate_priv) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to allocate private rc structure\n");
- return NULL;
- }
-
- return rate_priv;
+ return kzalloc(sizeof(struct ath_rate_priv), gfp);
}
static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d7c129bb571..ee156e54314 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -533,7 +533,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
- "Reconfigure Beacon timers based on timestamp from the AP\n");
+ "Reconfigure beacon timers based on synchronized timestamp\n");
ath9k_set_beacon(sc);
}
@@ -1016,18 +1016,20 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
+#ifdef CONFIG_ATH9K_DEBUGFS
static s8 fix_rssi_inv_only(u8 rssi_val)
{
if (rssi_val == 128)
rssi_val = 0;
return (s8) rssi_val;
}
+#endif
-
-static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
- struct ath_rx_status *rs, u64 tsf)
+/* returns 1 if this was a spectral frame, even if not handled. */
+static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
{
-#ifdef CONFIG_ATH_DEBUG
+#ifdef CONFIG_ATH9K_DEBUGFS
struct ath_hw *ah = sc->sc_ah;
u8 bins[SPECTRAL_HT20_NUM_BINS];
u8 *vdata = (u8 *)hdr;
@@ -1035,7 +1037,8 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
struct ath_radar_info *radar_info;
struct ath_ht20_mag_info *mag_info;
int len = rs->rs_datalen;
- int i, dc_pos;
+ int dc_pos;
+ u16 length, max_magnitude;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -1044,7 +1047,14 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
- return;
+ return 0;
+
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ */
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
/* Variation in the data length is possible and will be fixed later.
* Note that we only support HT20 for now.
@@ -1053,19 +1063,13 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
*/
if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
(len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
- return;
-
- /* check if spectral scan bit is set. This does not have to be checked
- * if received through a SPECTRAL phy error, but shouldn't hurt.
- */
- radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
- if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
- return;
+ return 1;
fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
- fft_sample.tlv.length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
+ length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
+ fft_sample.tlv.length = __cpu_to_be16(length);
- fft_sample.freq = ah->curchan->chan->center_freq;
+ fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
fft_sample.noise = ah->noise;
@@ -1093,7 +1097,7 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
break;
default:
- return;
+ return 1;
}
/* DC value (value in the middle) is the blind spot of the spectral
@@ -1105,19 +1109,41 @@ static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
/* mag data is at the end of the frame, in front of radar_info */
mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
- /* Apply exponent and grab further auxiliary information. */
- for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++)
- fft_sample.data[i] = bins[i] << mag_info->max_exp;
+ /* copy raw bins without scaling them */
+ memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
+ fft_sample.max_exp = mag_info->max_exp & 0xf;
- fft_sample.max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+ max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
fft_sample.max_index = spectral_max_index(mag_info->all_bins);
fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample.tsf = tsf;
+ fft_sample.tsf = __cpu_to_be64(tsf);
ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+ return 1;
+#else
+ return 0;
#endif
}
+static void ath9k_apply_ampdu_details(struct ath_softc *sc,
+ struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
+{
+ if (rs->rs_isaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+
+ rxs->ampdu_reference = sc->rx.ampdu_ref;
+
+ if (!rs->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
+ sc->rx.ampdu_ref++;
+ }
+
+ if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
+ rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
+ }
+}
+
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
@@ -1202,8 +1228,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
- if ((rs.rs_status & ATH9K_RXERR_PHY))
- ath_process_fft(sc, hdr, &rs, rxs->mactime);
+ if (rs.rs_status & ATH9K_RXERR_PHY) {
+ if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
+ RX_STAT_INC(rx_spectral);
+ goto requeue_drop_frag;
+ }
+ }
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
@@ -1320,6 +1350,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
ath_ant_comb_scan(sc, &rs);
+ ath9k_apply_ampdu_details(sc, &rs, rxs);
+
ieee80211_rx(hw, skb);
requeue_drop_frag:
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index feacaafee95..89a64411b82 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1233,7 +1233,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* in HT IBSS when a beacon with HT-info is received after the station
* has already been added.
*/
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (sta->ht_cap.ht_supported) {
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
@@ -1904,8 +1904,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
- ieee80211_is_data_qos(hdr->frame_control)) {
+ if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(txctl->an, tidno);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index ef82751722e..f293b3ff475 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1853,7 +1853,7 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SIGNAL_DBM;
if (!modparam_noht) {
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 116f4e807ae..9ecc1968262 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -204,7 +204,6 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
break;
default:
return -EOPNOTSUPP;
-
}
/* FW don't support scan after connection attempt */
@@ -228,8 +227,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
/* 0-based channel indexes */
cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
- wil_dbg(wil, "Scan for ch %d : %d MHz\n", ch,
- request->channels[i]->center_freq);
+ wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
}
return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
@@ -342,7 +341,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
}
out:
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
return rc;
}
@@ -425,8 +424,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
- wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
- channel->center_freq, info->privacy ? "secure" : "open");
+ wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
info->ssid, info->ssid_len);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 38049da7104..dc97e7b2609 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -38,7 +38,9 @@
#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
-#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT)
+#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
BIT_DMA_PSEUDO_CAUSE_TX | \
@@ -50,7 +52,6 @@
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
-
}
#else /* defined(CONFIG_WIL6210_ISR_COR) */
/* configure to Write-1-to-Clear mode */
@@ -94,7 +95,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_IRQ(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
@@ -125,7 +126,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_IRQ(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
set_bit(wil_status_irqen, &wil->status);
@@ -135,7 +136,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
void wil6210_disable_irq(struct wil6210_priv *wil)
{
- wil_dbg_IRQ(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
@@ -145,7 +146,7 @@ void wil6210_disable_irq(struct wil6210_priv *wil)
void wil6210_enable_irq(struct wil6210_priv *wil)
{
- wil_dbg_IRQ(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICC));
@@ -167,7 +168,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
- wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (!isr) {
wil_err(wil, "spurious IRQ: RX\n");
@@ -177,7 +178,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
wil6210_mask_irq_rx(wil);
if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
- wil_dbg_IRQ(wil, "RX done\n");
+ wil_dbg_irq(wil, "RX done\n");
isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
wil_rx_handle(wil);
}
@@ -197,7 +198,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
- wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (!isr) {
wil_err(wil, "spurious IRQ: TX\n");
@@ -208,13 +209,13 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
uint i;
- wil_dbg_IRQ(wil, "TX done\n");
+ wil_dbg_irq(wil, "TX done\n");
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
for (i = 0; i < 24; i++) {
u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
if (isr & mask) {
isr &= ~mask;
- wil_dbg_IRQ(wil, "TX done(%i)\n", i);
+ wil_dbg_irq(wil, "TX done(%i)\n", i);
wil_tx_complete(wil, i);
}
}
@@ -228,6 +229,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
return IRQ_HANDLED;
}
+static void wil_notify_fw_error(struct wil6210_priv *wil)
+{
+ struct device *dev = &wil_to_ndev(wil)->dev;
+ char *envp[3] = {
+ [0] = "SOURCE=wil6210",
+ [1] = "EVENT=FW_ERROR",
+ [2] = NULL,
+ };
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -235,7 +247,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
- wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr);
+ wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
if (!isr) {
wil_err(wil, "spurious IRQ: MISC\n");
@@ -244,8 +256,15 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
wil6210_mask_irq_misc(wil);
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil_dbg_irq(wil, "IRQ: Firmware error\n");
+ clear_bit(wil_status_fwready, &wil->status);
+ wil_notify_fw_error(wil);
+ isr &= ~ISR_MISC_FW_ERROR;
+ }
+
if (isr & ISR_MISC_FW_READY) {
- wil_dbg_IRQ(wil, "IRQ: FW ready\n");
+ wil_dbg_irq(wil, "IRQ: FW ready\n");
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
@@ -268,10 +287,10 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
struct wil6210_priv *wil = cookie;
u32 isr = wil->isr_misc;
- wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr);
+ wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
if (isr & ISR_MISC_MBOX_EVT) {
- wil_dbg_IRQ(wil, "MBOX event\n");
+ wil_dbg_irq(wil, "MBOX event\n");
wmi_recv_cmd(wil);
isr &= ~ISR_MISC_MBOX_EVT;
}
@@ -293,7 +312,7 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- wil_dbg_IRQ(wil, "Thread IRQ\n");
+ wil_dbg_irq(wil, "Thread IRQ\n");
/* Discover real IRQ cause */
if (wil->isr_misc)
wil6210_irq_misc_thread(irq, cookie);
@@ -370,6 +389,8 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
if (wil6210_debug_irq_mask(wil, pseudo_cause))
return IRQ_NONE;
+ wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
+
wil6210_mask_irq_pseudo(wil);
/* Discover real IRQ cause
@@ -401,8 +422,6 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
if (rc != IRQ_WAKE_THREAD)
wil6210_unmask_irq_pseudo(wil);
- wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause);
-
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 95fcd361322..761c389586d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -64,7 +64,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
- wil_dbg(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "%s()\n", __func__);
wil_link_off(wil);
clear_bit(wil_status_fwconnected, &wil->status);
@@ -80,11 +80,13 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
GFP_KERNEL);
break;
default:
- ;
+ break;
}
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
wil_vring_fini_tx(wil, i);
+
+ clear_bit(wil_status_dontscan, &wil->status);
}
static void wil_disconnect_worker(struct work_struct *work)
@@ -99,7 +101,7 @@ static void wil_connect_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;
- wil_dbg(wil, "Connect timeout\n");
+ wil_dbg_misc(wil, "Connect timeout\n");
/* reschedule to thread context - disconnect won't
* run from atomic context
@@ -107,9 +109,18 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
int wil_priv_init(struct wil6210_priv *wil)
{
- wil_dbg(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "%s()\n", __func__);
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
@@ -136,11 +147,7 @@ int wil_priv_init(struct wil6210_priv *wil)
return -EAGAIN;
}
- /* make shadow copy of registers that should not change on run time */
- wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
- sizeof(struct wil6210_mbox_ctl));
- wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
- wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+ wil_cache_mbox_regs(wil);
return 0;
}
@@ -162,7 +169,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
static void wil_target_reset(struct wil6210_priv *wil)
{
- wil_dbg(wil, "Resetting...\n");
+ wil_dbg_misc(wil, "Resetting...\n");
/* register write */
#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
@@ -202,7 +209,7 @@ static void wil_target_reset(struct wil6210_priv *wil)
msleep(2000);
- wil_dbg(wil, "Reset completed\n");
+ wil_dbg_misc(wil, "Reset completed\n");
#undef W
#undef S
@@ -225,8 +232,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
wil_err(wil, "Firmware not ready\n");
return -ETIME;
} else {
- wil_dbg(wil, "FW ready after %d ms\n",
- jiffies_to_msecs(to-left));
+ wil_dbg_misc(wil, "FW ready after %d ms\n",
+ jiffies_to_msecs(to-left));
}
return 0;
}
@@ -243,13 +250,13 @@ int wil_reset(struct wil6210_priv *wil)
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL);
+ wil6210_disable_irq(wil);
+ wil->status = 0;
+
wmi_event_flush(wil);
- flush_workqueue(wil->wmi_wq);
flush_workqueue(wil->wmi_wq_conn);
-
- wil6210_disable_irq(wil);
- wil->status = 0;
+ flush_workqueue(wil->wmi_wq);
/* TODO: put MAC in reset */
wil_target_reset(wil);
@@ -258,11 +265,7 @@ int wil_reset(struct wil6210_priv *wil)
wil->pending_connect_cid = -1;
INIT_COMPLETION(wil->wmi_ready);
- /* make shadow copy of registers that should not change on run time */
- wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
- sizeof(struct wil6210_mbox_ctl));
- wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
- wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+ wil_cache_mbox_regs(wil);
/* TODO: release MAC reset */
wil6210_enable_irq(wil);
@@ -278,7 +281,7 @@ void wil_link_on(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "%s()\n", __func__);
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
@@ -288,7 +291,7 @@ void wil_link_off(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "%s()\n", __func__);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
@@ -311,27 +314,27 @@ static int __wil_up(struct wil6210_priv *wil)
wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
- wil_dbg(wil, "type: STATION\n");
+ wil_dbg_misc(wil, "type: STATION\n");
bi = 0;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_AP:
- wil_dbg(wil, "type: AP\n");
+ wil_dbg_misc(wil, "type: AP\n");
bi = 100;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- wil_dbg(wil, "type: P2P_CLIENT\n");
+ wil_dbg_misc(wil, "type: P2P_CLIENT\n");
bi = 0;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_GO:
- wil_dbg(wil, "type: P2P_GO\n");
+ wil_dbg_misc(wil, "type: P2P_GO\n");
bi = 100;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_MONITOR:
- wil_dbg(wil, "type: Monitor\n");
+ wil_dbg_misc(wil, "type: Monitor\n");
bi = 0;
ndev->type = ARPHRD_IEEE80211_RADIOTAP;
/* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
@@ -354,7 +357,7 @@ static int __wil_up(struct wil6210_priv *wil)
wmi_set_channel(wil, channel->hw_value);
break;
default:
- ;
+ break;
}
/* MAC address - pre-requisite for other commands */
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 3068b5cb53a..8ce2e33dce2 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -35,37 +35,12 @@ static int wil_stop(struct net_device *ndev)
return wil_down(wil);
}
-/*
- * AC to queue mapping
- *
- * AC_VO -> queue 3
- * AC_VI -> queue 2
- * AC_BE -> queue 1
- * AC_BK -> queue 0
- */
-static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb)
-{
- static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
- struct wil6210_priv *wil = ndev_to_wil(ndev);
- u16 rc;
-
- skb->priority = cfg80211_classify8021d(skb);
-
- rc = wil_1d_to_queue[skb->priority];
-
- wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority,
- (int)rc);
-
- return rc;
-}
-
static const struct net_device_ops wil_netdev_ops = {
.ndo_open = wil_open,
.ndo_stop = wil_stop,
.ndo_start_xmit = wil_start_xmit,
- .ndo_select_queue = wil_select_queue,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
};
void *wil_if_alloc(struct device *dev, void __iomem *csr)
@@ -97,7 +72,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
- ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1);
+ ndev = alloc_netdev(0, "wlan%d", ether_setup);
if (!ndev) {
dev_err(dev, "alloc_netdev_mqs failed\n");
rc = -ENOMEM;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 0fc83edd6ba..81c35c6e383 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -53,7 +53,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
}
wil->n_msi = use_msi;
if (wil->n_msi) {
- wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi);
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
rc = pci_enable_msi_block(pdev, wil->n_msi);
if (rc && (wil->n_msi == 3)) {
wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
@@ -65,7 +65,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
wil->n_msi = 0;
}
} else {
- wil_dbg(wil, "MSI interrupts disabled, use INTx\n");
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
}
rc = wil6210_init_irq(wil, pdev->irq);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index f29c294413c..d1315b44237 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -74,8 +74,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->swtail = 0;
vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
if (!vring->ctx) {
- wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n",
- vring->size);
vring->va = NULL;
return -ENOMEM;
}
@@ -100,8 +98,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
d->dma.status = TX_DMA_STATUS_DU;
}
- wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
- vring->va, (unsigned long long)vring->pa, vring->ctx);
+ wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+ vring->va, (unsigned long long)vring->pa, vring->ctx);
return 0;
}
@@ -353,8 +351,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
wil_rx_add_radiotap_header(wil, skb, d);
- wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
- wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
wil_vring_advance_head(vring, 1);
@@ -369,7 +367,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
*/
ftype = wil_rxdesc_ftype(d) << 2;
if (ftype != IEEE80211_FTYPE_DATA) {
- wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype);
+ wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
/* TODO: process it */
kfree_skb(skb);
return NULL;
@@ -430,6 +428,8 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
int rc;
unsigned int len = skb->len;
+ skb_orphan(skb);
+
if (in_interrupt())
rc = netif_rx(skb);
else
@@ -459,13 +459,11 @@ void wil_rx_handle(struct wil6210_priv *wil)
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
- wil_dbg_TXRX(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "%s()\n", __func__);
while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
- wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
- skb_orphan(skb);
-
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
skb->dev = ndev;
skb_reset_mac_header(skb);
@@ -484,53 +482,18 @@ void wil_rx_handle(struct wil6210_priv *wil)
int wil_rx_init(struct wil6210_priv *wil)
{
- struct net_device *ndev = wil_to_ndev(wil);
- struct wireless_dev *wdev = wil->wdev;
struct vring *vring = &wil->vring_rx;
int rc;
- struct wmi_cfg_rx_chain_cmd cmd = {
- .action = WMI_RX_CHAIN_ADD,
- .rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
- },
- .mid = 0, /* TODO - what is it? */
- .decap_trans_type = WMI_DECAP_TYPE_802_3,
- };
- struct {
- struct wil6210_mbox_hdr_wmi wmi;
- struct wmi_cfg_rx_chain_done_event evt;
- } __packed evt;
vring->size = WIL6210_RX_RING_SIZE;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
- cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
- cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size);
- if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
-
- cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
- if (ch)
- cmd.sniffer_cfg.channel = ch->hw_value - 1;
- cmd.sniffer_cfg.phy_info_mode =
- cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
- cmd.sniffer_cfg.phy_support =
- cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
- ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
- }
- /* typical time for secure PCP is 840ms */
- rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
- WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ rc = wmi_rx_chain_add(wil, vring);
if (rc)
goto err_free;
- vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
-
- wil_dbg(wil, "Rx init: status %d tail 0x%08x\n",
- le32_to_cpu(evt.evt.status), vring->hwtail);
-
rc = wil_rx_refill(wil, vring->size);
if (rc)
goto err_free;
@@ -546,25 +509,8 @@ void wil_rx_fini(struct wil6210_priv *wil)
{
struct vring *vring = &wil->vring_rx;
- if (vring->va) {
- int rc;
- struct wmi_cfg_rx_chain_cmd cmd = {
- .action = cpu_to_le32(WMI_RX_CHAIN_DEL),
- .rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
- },
- };
- struct {
- struct wil6210_mbox_hdr_wmi wmi;
- struct wmi_cfg_rx_chain_done_event cfg;
- } __packed wmi_rx_cfg_reply;
-
- rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
- WMI_CFG_RX_CHAIN_DONE_EVENTID,
- &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply),
- 100);
+ if (vring->va)
wil_vring_free(wil, vring, 0);
- }
}
int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
@@ -617,6 +563,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
wil_err(wil, "Tx config failed, status 0x%02x\n",
reply.cmd.status);
+ rc = -EINVAL;
goto out_free;
}
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
@@ -689,7 +636,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
uint i = swhead;
dma_addr_t pa;
- wil_dbg_TXRX(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "%s()\n", __func__);
if (avail < vring->size/8)
netif_tx_stop_all_queues(wil_to_ndev(wil));
@@ -706,9 +653,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
pa = dma_map_single(dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+ wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
skb->data, (unsigned long long)pa);
- wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
if (unlikely(dma_mapping_error(dev, pa)))
@@ -737,12 +684,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
- wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4,
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
/* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1);
- wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
/* hold reference to skb
* to prevent skb release before accounting
@@ -775,7 +722,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct vring *vring;
int rc;
- wil_dbg_TXRX(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "%s()\n", __func__);
if (!test_bit(wil_status_fwready, &wil->status)) {
wil_err(wil, "FW not ready\n");
goto drop;
@@ -802,15 +749,13 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
switch (rc) {
case 0:
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ /* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
case -ENOMEM:
return NETDEV_TX_BUSY;
default:
- ; /* goto drop; */
- break;
+ break; /* goto drop; */
}
drop:
netif_tx_stop_all_queues(ndev);
@@ -827,6 +772,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
void wil_tx_complete(struct wil6210_priv *wil, int ringid)
{
+ struct net_device *ndev = wil_to_ndev(wil);
struct device *dev = wil_to_dev(wil);
struct vring *vring = &wil->vring_tx[ringid];
@@ -835,7 +781,7 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
return;
}
- wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid);
+ wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
while (!wil_vring_is_empty(vring)) {
volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
@@ -844,16 +790,23 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
if (!(d->dma.status & TX_DMA_STATUS_DU))
break;
- wil_dbg_TXRX(wil,
+ wil_dbg_txrx(wil,
"Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
vring->swtail, d->dma.length, d->dma.status,
d->dma.error);
- wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
skb = vring->ctx[vring->swtail];
if (skb) {
+ if (d->dma.error == 0) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ }
+
dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
vring->ctx[vring->swtail] = NULL;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 9bcfffa4006..aea961ff8f0 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -36,8 +36,6 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
-#define WIL6210_TX_QUEUES (4)
-
#define WIL6210_RX_RING_SIZE (128)
#define WIL6210_TX_RING_SIZE (128)
#define WIL6210_MAX_TX_RINGS (24)
@@ -101,8 +99,7 @@ struct RGF_ICR {
#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
#define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
- #define BIT_DMA_EP_MISC_ICR_FW_INT0 BIT(28)
- #define BIT_DMA_EP_MISC_ICR_FW_INT1 BIT(29)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
/* Interrupt moderation control */
#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
@@ -121,8 +118,9 @@ struct RGF_ICR {
#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
/* ISR register bits */
-#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0
-#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT(0)
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
+#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
/* Hardware definitions end */
@@ -272,17 +270,18 @@ struct wil6210_priv {
#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
-#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
-#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
-#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
-#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize, \
+#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize, \
+#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
prefix_type, rowsize, \
@@ -328,6 +327,7 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr, int key_len, const void *key);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 12915f6e761..0bb3b76b4b5 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -18,8 +18,10 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
#include "wil6210.h"
+#include "txrx.h"
#include "wmi.h"
/**
@@ -186,7 +188,6 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
-
}
might_sleep();
@@ -213,7 +214,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
/* next head */
next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
- wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
/* wait till FW finish with previous command */
for (retry = 5; retry > 0; retry--) {
r->tail = ioread32(wil->csr + HOST_MBOX +
@@ -234,10 +235,10 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
- wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
- wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
sizeof(cmd), true);
- wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
@@ -273,7 +274,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
struct wmi_ready_event *evt = d;
u32 ver = le32_to_cpu(evt->sw_version);
- wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+ wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -286,7 +287,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
int len)
{
- wil_dbg_WMI(wil, "WMI: FW ready\n");
+ wil_dbg_wmi(wil, "WMI: FW ready\n");
set_bit(wil_status_fwready, &wil->status);
/* reuse wmi_ready for the firmware ready indication */
@@ -309,11 +310,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
u32 d_len = le32_to_cpu(data->info.len);
u16 d_status = le16_to_cpu(data->info.status);
- wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n",
+ wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
data->info.channel, data->info.mcs, data->info.snr);
- wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+ wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
le16_to_cpu(data->info.stype));
- wil_dbg_WMI(wil, "qid %d mid %d cid %d\n",
+ wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
if (!channel) {
@@ -329,15 +330,15 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
u.beacon.variable);
- wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
tsf, cap, bi, ie_buf, ie_len,
signal, GFP_KERNEL);
if (bss) {
- wil_dbg_WMI(wil, "Added BSS %pM\n",
+ wil_dbg_wmi(wil, "Added BSS %pM\n",
rx_mgmt_frame->bssid);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
} else {
wil_err(wil, "cfg80211_inform_bss() failed\n");
}
@@ -351,7 +352,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
struct wmi_scan_complete_event *data = d;
bool aborted = (data->status != 0);
- wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
cfg80211_scan_done(wil->scan_request, aborted);
wil->scan_request = NULL;
} else {
@@ -386,9 +387,9 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
return;
}
ch = evt->channel + 1;
- wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n",
+ wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
evt->bssid, ch, evt->cid);
- wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
evt->assoc_info, len - sizeof(*evt), true);
/* figure out IE's */
@@ -450,14 +451,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
{
struct wmi_disconnect_event *evt = d;
- wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n",
+ wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
evt->bssid,
evt->protocol_reason_status, evt->disconnect_reason);
wil->sinfo_gen++;
wil6210_disconnect(wil, evt->bssid);
- clear_bit(wil_status_dontscan, &wil->status);
}
static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@@ -476,7 +476,7 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
- wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n"
+ wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
"BF status 0x%08x SNR 0x%08x\n"
"Tx Tpt %d goodput %d Rx goodput %d\n"
"Sectors(rx:tx) my %d:%d peer %d:%d\n",
@@ -501,7 +501,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
struct sk_buff *skb;
struct ethhdr *eth;
- wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len,
+ wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
evt->src_mac);
if (eapol_len > 196) { /* TODO: revisit size limit */
@@ -587,11 +587,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
event.wmi) + len, 4),
GFP_KERNEL);
- if (!evt) {
- wil_err(wil, "kmalloc for WMI event (%d) failed\n",
- len);
+ if (!evt)
return;
- }
+
evt->event.hdr = hdr;
cmd = (void *)&evt->event.wmi;
wil_memcpy_fromio_32(cmd, src, len);
@@ -599,15 +597,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
iowrite32(0, wil->csr + HOSTADDR(r->tail) +
offsetof(struct wil6210_mbox_ring_desc, sync));
/* indicate */
- wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n",
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
hdr.flags);
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- wil_dbg_WMI(wil, "WMI event 0x%04x\n",
+ wil_dbg_wmi(wil, "WMI event 0x%04x\n",
evt->event.wmi.id);
}
- wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
&evt->event.hdr, sizeof(hdr) + len, true);
/* advance tail */
@@ -623,7 +621,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
{
int q = queue_work(wil->wmi_wq,
&wil->wmi_event_worker);
- wil_dbg_WMI(wil, "queue_work -> %d\n", q);
+ wil_dbg_wmi(wil, "queue_work -> %d\n", q);
}
}
}
@@ -650,7 +648,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
cmdid, reply_id, to_msec);
rc = -ETIME;
} else {
- wil_dbg_WMI(wil,
+ wil_dbg_wmi(wil,
"wmi_call(0x%04x->0x%04x) completed in %d msec\n",
cmdid, reply_id,
to_msec - jiffies_to_msecs(remain));
@@ -680,7 +678,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
memcpy(cmd.mac, addr, ETH_ALEN);
- wil_dbg_WMI(wil, "Set MAC %pM\n", addr);
+ wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
}
@@ -778,7 +776,7 @@ int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
skb_set_mac_header(skb, 0);
eth = eth_hdr(skb);
- wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+ wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
goto found_dest;
@@ -838,10 +836,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
- if (!cmd) {
- wil_err(wil, "kmalloc(%d) failed\n", len);
+ if (!cmd)
return -ENOMEM;
- }
cmd->mgmt_frm_type = type;
/* BUG: FW API define ieLen as u8. Will fix FW */
@@ -853,11 +849,60 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
return rc;
}
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ .ring_mem_base = cpu_to_le64(vring->pa),
+ .ring_size = cpu_to_le16(vring->size),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+ int rc;
+
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ }
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ return rc;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
+ rc = -EINVAL;
+
+ return rc;
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
- wil_dbg_WMI(wil, "%s()\n", __func__);
+ wil_dbg_wmi(wil, "%s()\n", __func__);
list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
list_del(&evt->list);
@@ -899,7 +944,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi));
}
- wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id);
+ wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
complete(&wil->wmi_ready);
return;
}
@@ -964,7 +1009,7 @@ void wmi_connect_worker(struct work_struct *work)
return;
}
- wil_dbg_WMI(wil, "Configure for connection CID %d\n",
+ wil_dbg_wmi(wil, "Configure for connection CID %d\n",
wil->pending_connect_cid);
rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1a6661a9f00..756e19fc279 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -26,6 +26,7 @@ brcmfmac-objs += \
wl_cfg80211.o \
fwil.o \
fweh.o \
+ p2p.o \
dhd_cdc.o \
dhd_common.o \
dhd_linux.o
@@ -37,4 +38,4 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
- dhd_dbg.o \ No newline at end of file
+ dhd_dbg.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index a2f32fb990f..ef6f23be6d3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -72,6 +72,7 @@
#define BRCMF_C_SET_WSEC 134
#define BRCMF_C_GET_PHY_NOISE 135
#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_SET_SCB_TIMEOUT 158
#define BRCMF_C_GET_PHYLIST 180
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
@@ -149,6 +150,7 @@
#define BRCMF_E_REASON_MINTXRATE 9
#define BRCMF_E_REASON_TXFAIL 10
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
#define BRCMF_E_REASON_DIRECTED_ROAM 6
#define BRCMF_E_REASON_TSPEC_REJECTED 7
@@ -375,6 +377,28 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+ u8 scan_type; /* 0 use default, active or passive scan */
+ __le32 nprobes; /* -1 use default, nr of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the home
+ * channel between channel scans
+ */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+ struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
+ struct brcmf_join_scan_params_le scan_le;
+ struct brcmf_assoc_params_le assoc_le;
+};
+
struct brcmf_wsec_key {
u32 index; /* key index */
u32 len; /* key length */
@@ -451,6 +475,19 @@ struct brcmf_sta_info_le {
__le32 rx_decrypt_failures; /* # of packet decrypted failed */
};
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+ __be16 version;
+ __be16 chanspec;
+ __be32 rssi;
+ __be32 mactime;
+ __be32 rate;
+};
+
/* Bus independent dongle command */
struct brcmf_dcmd {
uint cmd; /* common dongle cmd definition */
@@ -489,9 +526,6 @@ struct brcmf_pub {
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- atomic_t pend_8021x_cnt;
- wait_queue_head_t pend_8021x_wait;
-
struct brcmf_fweh_info fweh;
#ifdef DEBUG
struct dentry *dbgfs_dir;
@@ -515,9 +549,11 @@ struct brcmf_cfg80211_vif;
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
* @stats: interface specific network statistics.
- * @idx: interface index in device firmware.
+ * @ifidx: interface index in device firmware.
* @bssidx: index of bss associated with this interface.
* @mac_addr: assigned mac address.
+ * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
+ * @pend_8021x_wait: used for signalling change in count.
*/
struct brcmf_if {
struct brcmf_pub *drvr;
@@ -526,9 +562,11 @@ struct brcmf_if {
struct net_device_stats stats;
struct work_struct setmacaddr_work;
struct work_struct multicast_work;
- int idx;
+ int ifidx;
s32 bssidx;
u8 mac_addr[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
+ wait_queue_head_t pend_8021x_wait;
};
@@ -547,9 +585,10 @@ extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
struct sk_buff *rxp);
-extern int brcmf_net_attach(struct brcmf_if *ifp);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
- s32 bssidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
+extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
+ s32 ifidx, char *name, u8 *mac_addr);
+extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 64c38f4226a..ad25c3408b5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -24,18 +24,6 @@ enum brcmf_bus_state {
BRCMF_BUS_DATA /* Ready for frame transfers */
};
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -72,11 +60,12 @@ struct brcmf_bus_ops {
* @drvr: public driver information.
* @state: operational state of the bus interface.
* @maxctl: maximum size for rxctl request message.
- * @drvr_up: indicates driver up/down status.
* @tx_realloc: number of tx packets realloced for headroom.
* @dstats: dongle-based statistical data.
* @align: alignment requirement for the bus.
* @dcmd_list: bus/device specific dongle initialization commands.
+ * @chip: device identifier of the dongle chip.
+ * @chiprev: revision of the dongle chip.
*/
struct brcmf_bus {
union {
@@ -87,10 +76,10 @@ struct brcmf_bus {
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
uint maxctl;
- bool drvr_up;
unsigned long tx_realloc;
- struct dngl_stats dstats;
u8 align;
+ u32 chip;
+ u32 chiprev;
struct list_head dcmd_list;
struct brcmf_bus_ops *ops;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index bb454cdab29..a2354d951dd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -303,6 +303,14 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
return -EBADE;
}
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (*ifidx)
+ (*ifidx)++;
if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
BDC_PROTO_VER) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 14b8fdde695..c06cea88df0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -26,6 +26,8 @@
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
@@ -40,6 +42,12 @@ MODULE_LICENSE("Dual BSD/GPL");
int brcmf_msg_level;
module_param(brcmf_msg_level, int, 0);
+/* P2P0 enable */
+static int brcmf_p2p_enable;
+#ifdef CONFIG_BRCMDBG
+module_param_named(p2pon, brcmf_p2p_enable, int, 0);
+MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
+#endif
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
@@ -70,9 +78,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
u32 buflen;
s32 err;
- brcmf_dbg(TRACE, "enter\n");
-
ifp = container_of(work, struct brcmf_if, multicast_work);
+
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
ndev = ifp->ndev;
/* Determine initial value of allmulti flag */
@@ -129,9 +138,10 @@ _brcmf_set_mac_address(struct work_struct *work)
struct brcmf_if *ifp;
s32 err;
- brcmf_dbg(TRACE, "enter\n");
-
ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
ETH_ALEN);
if (err < 0) {
@@ -168,7 +178,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* Can the device send data? */
if (drvr->bus_if->state != BRCMF_BUS_DATA) {
@@ -179,8 +189,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
- if (!drvr->iflist[ifp->idx]) {
- brcmf_err("bad ifidx %d\n", ifp->idx);
+ if (!drvr->iflist[ifp->bssidx]) {
+ brcmf_err("bad ifidx %d\n", ifp->bssidx);
netif_stop_queue(ndev);
dev_kfree_skb(skb);
ret = -ENODEV;
@@ -192,14 +202,14 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct sk_buff *skb2;
brcmf_dbg(INFO, "%s: insufficient headroom\n",
- brcmf_ifname(drvr, ifp->idx));
+ brcmf_ifname(drvr, ifp->bssidx));
drvr->bus_if->tx_realloc++;
skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
brcmf_err("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(drvr, ifp->idx));
+ brcmf_ifname(drvr, ifp->bssidx));
ret = -ENOMEM;
goto done;
}
@@ -217,19 +227,21 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
if (is_multicast_ether_addr(eh->h_dest))
drvr->tx_multicast++;
if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr->pend_8021x_cnt);
+ atomic_inc(&ifp->pend_8021x_cnt);
/* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+ brcmf_proto_hdrpush(drvr, ifp->ifidx, skb);
/* Use bus module to send data frame */
ret = brcmf_bus_txdata(drvr->bus_if, skb);
done:
- if (ret)
- drvr->bus_if->dstats.tx_dropped++;
- else
- drvr->bus_if->dstats.tx_packets++;
+ if (ret) {
+ ifp->stats.tx_dropped++;
+ } else {
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += skb->len;
+ }
/* Return ok: we always eat the packet */
return NETDEV_TX_OK;
@@ -270,12 +282,13 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
- /* process and remove protocol-specific header
- */
+ /* process and remove protocol-specific header */
ret = brcmf_proto_hdrpull(drvr, &ifidx, skb);
- if (ret < 0) {
- if (ret != -ENODATA)
- bus_if->dstats.rx_errors++;
+ ifp = drvr->iflist[ifidx];
+
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
continue;
}
@@ -295,21 +308,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
eth = skb->data;
len = skb->len;
- ifp = drvr->iflist[ifidx];
- if (ifp == NULL)
- ifp = drvr->iflist[0];
-
- if (!ifp || !ifp->ndev ||
- ifp->ndev->reg_state != NETREG_REGISTERED) {
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
-
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- bus_if->dstats.multicast++;
+ ifp->stats.multicast++;
skb->data = eth;
skb->len = len;
@@ -325,8 +328,13 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
ifp->ndev->last_rx = jiffies;
}
- bus_if->dstats.rx_bytes += skb->len;
- bus_if->dstats.rx_packets++; /* Local count */
+ if (!(ifp->ndev->flags & IFF_UP)) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
+
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
if (in_interrupt())
netif_rx(skb);
@@ -348,36 +356,31 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
u16 type;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_if *ifp;
brcmf_proto_hdrpull(drvr, &ifidx, txp);
+ ifp = drvr->iflist[ifidx];
+ if (!ifp)
+ return;
+
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE) {
- atomic_dec(&drvr->pend_8021x_cnt);
- if (waitqueue_active(&drvr->pend_8021x_wait))
- wake_up(&drvr->pend_8021x_wait);
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
}
+ if (!success)
+ ifp->stats.tx_errors++;
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_bus *bus_if = ifp->drvr->bus_if;
-
- brcmf_dbg(TRACE, "Enter\n");
- /* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = bus_if->dstats.rx_packets;
- ifp->stats.tx_packets = bus_if->dstats.tx_packets;
- ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
- ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
- ifp->stats.rx_errors = bus_if->dstats.rx_errors;
- ifp->stats.tx_errors = bus_if->dstats.tx_errors;
- ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
- ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
- ifp->stats.multicast = bus_if->dstats.multicast;
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
return &ifp->stats;
}
@@ -431,7 +434,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
u32 toe_cmpnt, csum_dir;
int ret;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* all ethtool calls start with a cmd word */
if (copy_from_user(&cmd, uaddr, sizeof(u32)))
@@ -454,13 +457,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
sprintf(info.driver, "dhd");
strcpy(info.version, BRCMF_VERSION_STR);
}
-
- /* otherwise, require dongle to be up */
- else if (!drvr->bus_if->drvr_up) {
- brcmf_err("dongle is not up\n");
- return -ENODEV;
- }
- /* finally, report dongle driver type */
+ /* report dongle driver type */
else
sprintf(info.driver, "wl");
@@ -534,9 +531,9 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
+ brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
- if (!drvr->iflist[ifp->idx])
+ if (!drvr->iflist[ifp->bssidx])
return -1;
if (cmd == SIOCETHTOOL)
@@ -548,17 +545,12 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- brcmf_dbg(TRACE, "Enter\n");
- if (drvr->bus_if->drvr_up == 0)
- return 0;
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
brcmf_cfg80211_down(ndev);
/* Set state and stop OS transmissions */
- drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -572,7 +564,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
u32 toe_ol;
s32 ret = 0;
- brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* If bus is not ready, can't continue */
if (bus_if->state != BRCMF_BUS_DATA) {
@@ -580,9 +572,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
return -EAGAIN;
}
- atomic_set(&drvr->pend_8021x_cnt, 0);
-
- memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
+ atomic_set(&ifp->pend_8021x_cnt, 0);
/* Get current TOE mode from dongle */
if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
@@ -593,7 +583,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr->bus_if->drvr_up = true;
if (brcmf_cfg80211_up(ndev)) {
brcmf_err("failed to bring up cfg80211\n");
return -1;
@@ -612,29 +601,18 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-static const struct net_device_ops brcmf_netdev_ops_virt = {
- .ndo_open = brcmf_cfg80211_up,
- .ndo_stop = brcmf_cfg80211_down,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
-int brcmf_net_attach(struct brcmf_if *ifp)
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
+ s32 err;
- brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
+ brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+ ifp->mac_addr);
ndev = ifp->ndev;
/* set appropriate operations */
- if (!ifp->idx)
- ndev->netdev_ops = &brcmf_netdev_ops_pri;
- else
- ndev->netdev_ops = &brcmf_netdev_ops_virt;
+ ndev->netdev_ops = &brcmf_netdev_ops_pri;
ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
@@ -645,7 +623,14 @@ int brcmf_net_attach(struct brcmf_if *ifp)
/* set the mac address */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- if (register_netdev(ndev) != 0) {
+ INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
+
+ if (rtnl_locked)
+ err = register_netdevice(ndev);
+ else
+ err = register_netdev(ndev);
+ if (err != 0) {
brcmf_err("couldn't register the net device\n");
goto fail;
}
@@ -659,16 +644,78 @@ fail:
return -EBADE;
}
-struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
- char *name, u8 *addr_mask)
+static int brcmf_net_p2p_open(struct net_device *ndev)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return brcmf_cfg80211_up(ndev);
+}
+
+static int brcmf_net_p2p_stop(struct net_device *ndev)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return brcmf_cfg80211_down(ndev);
+}
+
+static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
+ struct ifreq *ifr, int cmd)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+ return 0;
+}
+
+static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_p2p = {
+ .ndo_open = brcmf_net_p2p_open,
+ .ndo_stop = brcmf_net_p2p_stop,
+ .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
+ .ndo_start_xmit = brcmf_net_p2p_start_xmit
+};
+
+static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
+{
+ struct net_device *ndev;
+
+ brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+ ifp->mac_addr);
+ ndev = ifp->ndev;
+
+ ndev->netdev_ops = &brcmf_netdev_ops_p2p;
+
+ /* set the mac address */
+ memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+
+ if (register_netdev(ndev) != 0) {
+ brcmf_err("couldn't register the p2p net device\n");
+ goto fail;
+ }
+
+ brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+ return 0;
+
+fail:
+ return -EBADE;
+}
+
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+ char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
struct net_device *ndev;
- int i;
- brcmf_dbg(TRACE, "idx %d\n", ifidx);
+ brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
- ifp = drvr->iflist[ifidx];
+ ifp = drvr->iflist[bssidx];
/*
* Delete the existing interface before overwriting it
* in case we missed the BRCMF_E_IF_DEL event.
@@ -680,7 +727,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev);
free_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
+ drvr->iflist[bssidx] = NULL;
} else {
brcmf_err("ignore IF event\n");
return ERR_PTR(-EINVAL);
@@ -697,16 +744,15 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
ifp = netdev_priv(ndev);
ifp->ndev = ndev;
ifp->drvr = drvr;
- drvr->iflist[ifidx] = ifp;
- ifp->idx = ifidx;
+ drvr->iflist[bssidx] = ifp;
+ ifp->ifidx = ifidx;
ifp->bssidx = bssidx;
- INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
- if (addr_mask != NULL)
- for (i = 0; i < ETH_ALEN; i++)
- ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
+ init_waitqueue_head(&ifp->pend_8021x_wait);
+
+ if (mac_addr != NULL)
+ memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
current->pid, ifp->ndev->name, ifp->mac_addr);
@@ -714,19 +760,18 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
return ifp;
}
-void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
{
struct brcmf_if *ifp;
- brcmf_dbg(TRACE, "idx %d\n", ifidx);
-
- ifp = drvr->iflist[ifidx];
+ ifp = drvr->iflist[bssidx];
if (!ifp) {
- brcmf_err("Null interface\n");
+ brcmf_err("Null interface, idx=%d\n", bssidx);
return;
}
+ brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
if (ifp->ndev) {
- if (ifidx == 0) {
+ if (bssidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
rtnl_lock();
brcmf_netdev_stop(ifp->ndev);
@@ -736,12 +781,14 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
netif_stop_queue(ifp->ndev);
}
- cancel_work_sync(&ifp->setmacaddr_work);
- cancel_work_sync(&ifp->multicast_work);
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ cancel_work_sync(&ifp->setmacaddr_work);
+ cancel_work_sync(&ifp->multicast_work);
+ }
unregister_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
- if (ifidx == 0)
+ drvr->iflist[bssidx] = NULL;
+ if (bssidx == 0)
brcmf_cfg80211_detach(drvr->config);
free_netdev(ifp->ndev);
}
@@ -781,8 +828,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
- init_waitqueue_head(&drvr->pend_8021x_wait);
-
return ret;
fail:
@@ -797,6 +842,7 @@ int brcmf_bus_start(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_if *ifp;
+ struct brcmf_if *p2p_ifp;
brcmf_dbg(TRACE, "\n");
@@ -812,6 +858,13 @@ int brcmf_bus_start(struct device *dev)
if (IS_ERR(ifp))
return PTR_ERR(ifp);
+ if (brcmf_p2p_enable)
+ p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
+ else
+ p2p_ifp = NULL;
+ if (IS_ERR(p2p_ifp))
+ p2p_ifp = NULL;
+
/* signal bus ready */
bus_if->state = BRCMF_BUS_DATA;
@@ -830,16 +883,22 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
- ret = brcmf_net_attach(ifp);
+ ret = brcmf_net_attach(ifp, false);
fail:
if (ret < 0) {
brcmf_err("failed: %d\n", ret);
if (drvr->config)
brcmf_cfg80211_detach(drvr->config);
- free_netdev(drvr->iflist[0]->ndev);
+ free_netdev(ifp->ndev);
drvr->iflist[0] = NULL;
+ if (p2p_ifp) {
+ free_netdev(p2p_ifp->ndev);
+ drvr->iflist[1] = NULL;
+ }
return ret;
}
+ if ((brcmf_p2p_enable) && (p2p_ifp))
+ brcmf_net_p2p_attach(p2p_ifp);
return 0;
}
@@ -865,12 +924,13 @@ void brcmf_dev_reset(struct device *dev)
if (drvr == NULL)
return;
- brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
+ if (drvr->iflist[0])
+ brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
}
void brcmf_detach(struct device *dev)
{
- int i;
+ s32 i;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -897,19 +957,18 @@ void brcmf_detach(struct device *dev)
kfree(drvr);
}
-static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
{
- return atomic_read(&drvr->pend_8021x_cnt);
+ return atomic_read(&ifp->pend_8021x_cnt);
}
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
int err;
- err = wait_event_timeout(drvr->pend_8021x_wait,
- !brcmf_get_pend_8021x_cnt(drvr),
+ err = wait_event_timeout(ifp->pend_8021x_wait,
+ !brcmf_get_pend_8021x_cnt(ifp),
msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
WARN_ON(!err);
@@ -917,6 +976,16 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
return !err;
}
+/*
+ * return chip id and rev of the device encoded in u32.
+ */
+u32 brcmf_get_chip_info(struct brcmf_if *ifp)
+{
+ struct brcmf_bus *bus = ifp->drvr->bus_if;
+
+ return bus->chip << 4 | bus->chiprev;
+}
+
static void brcmf_driver_init(struct work_struct *work)
{
brcmf_debugfs_init();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 7fef9b5ba00..4469321c0eb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -1096,7 +1096,6 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
- bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
rd->len = 0;
@@ -1298,7 +1297,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode < 0) {
brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
- bus->sdiodev->bus_if->dstats.rx_errors++;
sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
@@ -1445,10 +1443,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if (bus->rxblen)
buf = vzalloc(bus->rxblen);
- if (!buf) {
- brcmf_err("no memory for control frame\n");
+ if (!buf)
goto done;
- }
+
rbuf = bus->rxbuf;
pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
if (pad)
@@ -1478,7 +1475,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
- bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
@@ -1486,7 +1482,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
- bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
@@ -1634,7 +1629,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (!pkt) {
/* Give up on data, request rtx of events */
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
- bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
@@ -1652,7 +1646,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, sdret);
brcmu_pkt_buf_free_skb(pkt);
- bus->sdiodev->bus_if->dstats.rx_errors++;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, true,
RETRYCHAN(rd->channel));
@@ -1940,10 +1933,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
datalen = pkt->len - SDPCM_HDRLEN;
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
- if (ret)
- bus->sdiodev->bus_if->dstats.tx_errors++;
- else
- bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -1962,8 +1951,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (bus->sdiodev->bus_if->drvr_up &&
- (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false;
brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2710,9 +2698,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
(u8 *)&addr_le, 4);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2731,10 +2720,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
}
/* Read hndrte_shared structure */
- sdio_claim_host(bus->sdiodev->func[1]);
rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
- sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2836,14 +2823,12 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
return 0;
- sdio_claim_host(bus->sdiodev->func[1]);
error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
- sdio_release_host(bus->sdiodev->func[1]);
if (nbytes < 0)
return nbytes;
@@ -3308,9 +3293,6 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
int ret;
- if (bus->sdiodev->bus_if->drvr_up)
- return -EISCONN;
-
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
@@ -3941,6 +3923,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Assign bus interface call back */
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
+ bus->sdiodev->bus_if->chip = bus->ci->chip;
+ bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
/* Attach to the brcmf/OS/network interface */
ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index ba0b22512f1..e9d6f91a1f2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -189,24 +189,24 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
}
- ifp = drvr->iflist[ifevent->ifidx];
+ ifp = drvr->iflist[ifevent->bssidx];
if (ifevent->action == BRCMF_E_IF_ADD) {
brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
emsg->addr);
- ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
+ ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
emsg->ifname, emsg->addr);
if (IS_ERR(ifp))
return;
if (!drvr->fweh.evt_handler[BRCMF_E_IF])
- err = brcmf_net_attach(ifp);
+ err = brcmf_net_attach(ifp, false);
}
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
if (ifevent->action == BRCMF_E_IF_DEL)
- brcmf_del_if(drvr, ifevent->ifidx);
+ brcmf_del_if(drvr, ifevent->bssidx);
}
/**
@@ -250,8 +250,6 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
drvr = container_of(fweh, struct brcmf_pub, fweh);
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- ifp = drvr->iflist[event->ifidx];
-
brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
@@ -283,6 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
goto event_free;
}
+ ifp = drvr->iflist[emsg.bsscfgidx];
err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
event->data);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 36901f76a3b..8c39b51dccc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -83,6 +83,7 @@ struct brcmf_event;
BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
BRCMF_ENUM_DEF(TRACE, 52) \
BRCMF_ENUM_DEF(IF, 54) \
+ BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
BRCMF_ENUM_DEF(RSSI, 56) \
BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
@@ -96,8 +97,11 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
+ BRCMF_ENUM_DEF(PROBERESP_MSG, 71) \
+ BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
- BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74)
+ BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
+ BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index d8d8b6549dc..8d1def935b8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -45,9 +45,10 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+ err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
+ len);
else
- err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+ err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
len);
if (err >= 0)
@@ -100,6 +101,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
__le32 data_le = cpu_to_le32(data);
mutex_lock(&ifp->drvr->proto_block);
+ brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -116,6 +118,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
mutex_unlock(&ifp->drvr->proto_block);
*data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
new file mode 100644
index 00000000000..0f2c83bc95d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWIL_TYPES_H_
+#define FWIL_TYPES_H_
+
+#include <linux/if_ether.h>
+
+
+#define BRCMF_FIL_ACTION_FRAME_SIZE 1800
+
+
+enum brcmf_fil_p2p_if_types {
+ BRCMF_FIL_P2P_IF_CLIENT,
+ BRCMF_FIL_P2P_IF_GO,
+ BRCMF_FIL_P2P_IF_DYNBCN_GO,
+ BRCMF_FIL_P2P_IF_DEV,
+};
+
+struct brcmf_fil_p2p_if_le {
+ u8 addr[ETH_ALEN];
+ __le16 type;
+ __le16 chspec;
+};
+
+struct brcmf_fil_chan_info_le {
+ __le32 hw_channel;
+ __le32 target_channel;
+ __le32 scan_channel;
+};
+
+struct brcmf_fil_action_frame_le {
+ u8 da[ETH_ALEN];
+ __le16 len;
+ __le32 packet_id;
+ u8 data[BRCMF_FIL_ACTION_FRAME_SIZE];
+};
+
+struct brcmf_fil_af_params_le {
+ __le32 channel;
+ __le32 dwell_time;
+ u8 bssid[ETH_ALEN];
+ u8 pad[2];
+ struct brcmf_fil_action_frame_le action_frame;
+};
+
+struct brcmf_fil_bss_enable_le {
+ __le32 bsscfg_idx;
+ __le32 enable;
+};
+
+#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
new file mode 100644
index 00000000000..4166e642068
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -0,0 +1,2277 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include "fwil.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "wl_cfg80211.h"
+
+/* parameters used for p2p escan */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
+#define BRCMF_P2P_WILDCARD_SSID "DIRECT-"
+#define BRCMF_P2P_WILDCARD_SSID_LEN (sizeof(BRCMF_P2P_WILDCARD_SSID) - 1)
+
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+ (channel == SOCIAL_CHAN_2) || \
+ (channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+
+#define BRCMF_SCB_TIMEOUT_VALUE 20
+
+#define P2P_VER 9 /* P2P version: 9=WiFi P2P v1.0 */
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+#define P2P_AF_CATEGORY 0x7f
+#define P2P_OUI "\x50\x6F\x9A" /* P2P OUI */
+#define P2P_OUI_LEN 3 /* P2P OUI length */
+
+/* Action Frame Constants */
+#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action */
+#define DOT11_ACTION_CAT_OFF 0 /* category offset */
+#define DOT11_ACTION_ACT_OFF 1 /* action offset */
+
+#define P2P_AF_DWELL_TIME 200
+#define P2P_AF_MIN_DWELL_TIME 100
+#define P2P_AF_MED_DWELL_TIME 400
+#define P2P_AF_LONG_DWELL_TIME 1000
+#define P2P_AF_TX_MAX_RETRY 1
+#define P2P_AF_MAX_WAIT_TIME 2000
+#define P2P_INVALID_CHANNEL -1
+#define P2P_CHANNEL_SYNC_RETRY 5
+#define P2P_AF_FRM_SCAN_MAX_WAIT 1500
+#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+
+/* P2P Service Discovery related */
+#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
+
+/**
+ * struct brcmf_p2p_disc_st_le - set discovery state in firmware.
+ *
+ * @state: requested discovery state (see enum brcmf_p2p_disc_state).
+ * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state.
+ * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state.
+ */
+struct brcmf_p2p_disc_st_le {
+ u8 state;
+ __le16 chspec;
+ __le16 dwell;
+};
+
+/**
+ * enum brcmf_p2p_disc_state - P2P discovery state values
+ *
+ * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE.
+ * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time.
+ * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE.
+ */
+enum brcmf_p2p_disc_state {
+ WL_P2P_DISC_ST_SCAN,
+ WL_P2P_DISC_ST_LISTEN,
+ WL_P2P_DISC_ST_SEARCH
+};
+
+/**
+ * struct brcmf_p2p_scan_le - P2P specific scan request.
+ *
+ * @type: type of scan method requested (values: 'E' or 'S').
+ * @reserved: reserved (ignored).
+ * @eparams: parameters used for type 'E'.
+ * @sparams: parameters used for type 'S'.
+ */
+struct brcmf_p2p_scan_le {
+ u8 type;
+ u8 reserved[3];
+ union {
+ struct brcmf_escan_params_le eparams;
+ struct brcmf_scan_params_le sparams;
+ };
+};
+
+/**
+ * struct brcmf_p2p_pub_act_frame - WiFi P2P Public Action Frame
+ *
+ * @category: P2P_PUB_AF_CATEGORY
+ * @action: P2P_PUB_AF_ACTION
+ * @oui[3]: P2P_OUI
+ * @oui_type: OUI type - P2P_VER
+ * @subtype: OUI subtype - P2P_TYPE_*
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 oui[3];
+ u8 oui_type;
+ u8 subtype;
+ u8 dialog_token;
+ u8 elts[1];
+};
+
+/**
+ * struct brcmf_p2p_action_frame - WiFi P2P Action Frame
+ *
+ * @category: P2P_AF_CATEGORY
+ * @OUI[3]: OUI - P2P_OUI
+ * @type: OUI Type - P2P_VER
+ * @subtype: OUI Subtype - P2P_AF_*
+ * @dialog_token: nonzero, identifies req/resp tranaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_action_frame {
+ u8 category;
+ u8 oui[3];
+ u8 type;
+ u8 subtype;
+ u8 dialog_token;
+ u8 elts[1];
+};
+
+/**
+ * struct brcmf_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame
+ *
+ * @category: 0x04 Public Action Frame
+ * @action: 0x6c Advertisement Protocol
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ */
+struct brcmf_p2psd_gas_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 dialog_token;
+ u8 query_data[1];
+};
+
+/**
+ * struct brcmf_config_af_params - Action Frame Parameters for tx.
+ *
+ * @mpc_onoff: To make sure to send successfully action frame, we have to
+ * turn off mpc 0: off, 1: on, (-1): do nothing
+ * @search_channel: 1: search peer's channel to send af
+ * extra_listen: keep the dwell time to get af response frame.
+ */
+struct brcmf_config_af_params {
+ s32 mpc_onoff;
+ bool search_channel;
+ bool extra_listen;
+};
+
+/**
+ * brcmf_p2p_is_pub_action() - true if p2p public type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p public action type
+ */
+static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_pub_act_frame *pact_frm;
+
+ if (frame == NULL)
+ return false;
+
+ pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ return false;
+
+ if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+ pact_frm->action == P2P_PUB_AF_ACTION &&
+ pact_frm->oui_type == P2P_VER &&
+ memcmp(pact_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_is_p2p_action() - true if p2p action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p action type
+ */
+static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_action_frame *act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ act_frm = (struct brcmf_p2p_action_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ return false;
+
+ if (act_frm->category == P2P_AF_CATEGORY &&
+ act_frm->type == P2P_VER &&
+ memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_is_gas_action() - true if p2p gas action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p gas action type
+ */
+static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ return false;
+
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_print_actframe() - debug print routine.
+ *
+ * @tx: Received or to be transmitted
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Print information about the p2p action frame
+ */
+
+#ifdef DEBUG
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_pub_act_frame *pact_frm;
+ struct brcmf_p2p_action_frame *act_frm;
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+ if (!frame || frame_len <= 2)
+ return;
+
+ if (brcmf_p2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ switch (pact_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_GON_RSP:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_GON_CONF:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_INVITE_REQ:
+ brcmf_dbg(TRACE, "%s P2P Invitation Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_INVITE_RSP:
+ brcmf_dbg(TRACE, "%s P2P Invitation Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ brcmf_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ brcmf_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ brcmf_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ brcmf_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P Public Action Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ }
+ } else if (brcmf_p2p_is_p2p_action(frame, frame_len)) {
+ act_frm = (struct brcmf_p2p_action_frame *)frame;
+ switch (act_frm->subtype) {
+ case P2P_AF_NOTICE_OF_ABSENCE:
+ brcmf_dbg(TRACE, "%s P2P Notice of Absence Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_PRESENCE_REQ:
+ brcmf_dbg(TRACE, "%s P2P Presence Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_PRESENCE_RSP:
+ brcmf_dbg(TRACE, "%s P2P Presence Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_GO_DISC_REQ:
+ brcmf_dbg(TRACE, "%s P2P Discoverability Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P Action Frame\n",
+ (tx) ? "TX" : "RX");
+ }
+
+ } else if (brcmf_p2p_is_gas_action(frame, frame_len)) {
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ switch (sd_act_frm->action) {
+ case P2PSD_ACTION_ID_GAS_IREQ:
+ brcmf_dbg(TRACE, "%s P2P GAS Initial Request\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_IRESP:
+ brcmf_dbg(TRACE, "%s P2P GAS Initial Response\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_CREQ:
+ brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_CRESP:
+ brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P GAS Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ }
+ }
+}
+
+#else
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+}
+
+#endif
+
+
+/**
+ * brcmf_p2p_chnr_to_chspec() - convert channel number to chanspec.
+ *
+ * @channel: channel number
+ */
+static u16 brcmf_p2p_chnr_to_chspec(u16 channel)
+{
+ u16 chanspec;
+
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return chanspec;
+}
+
+
+/**
+ * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation.
+ *
+ * @ifp: ifp to use for iovars (primary).
+ * @p2p_mac: mac address to configure for p2p_da_override
+ */
+static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
+{
+ s32 ret = 0;
+
+ brcmf_fil_iovar_int_set(ifp, "apsta", 1);
+
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac,
+ ETH_ALEN);
+ if (ret)
+ brcmf_err("failed to update device address ret %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P.
+ *
+ * @p2p: P2P specific data.
+ *
+ * P2P needs mac addresses for P2P device and interface. These are
+ * derived from the primary net device, ie. the permanent ethernet
+ * address of the device.
+ */
+static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ struct brcmf_if *p2p_ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+
+ /* Generate the P2P Device Address. This consists of the device's
+ * primary MAC address with the locally administered bit set.
+ */
+ memcpy(p2p->dev_addr, pri_ifp->mac_addr, ETH_ALEN);
+ p2p->dev_addr[0] |= 0x02;
+ memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
+ * different from the P2P Device Address, but also locally administered.
+ */
+ memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->int_addr[4] ^= 0x80;
+}
+
+/**
+ * brcmf_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan.
+ *
+ * @request: the scan request as received from cfg80211.
+ *
+ * returns true if one of the ssids in the request matches the
+ * P2P wildcard ssid; otherwise returns false.
+ */
+static bool brcmf_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request)
+{
+ struct cfg80211_ssid *ssids = request->ssids;
+ int i;
+
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len != BRCMF_P2P_WILDCARD_SSID_LEN)
+ continue;
+
+ brcmf_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid);
+ if (!memcmp(BRCMF_P2P_WILDCARD_SSID, ssids[i].ssid,
+ BRCMF_P2P_WILDCARD_SSID_LEN))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * brcmf_p2p_set_discover_state - set discover state in firmware.
+ *
+ * @ifp: low-level interface object.
+ * @state: discover state to set.
+ * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only).
+ * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only).
+ */
+static s32 brcmf_p2p_set_discover_state(struct brcmf_if *ifp, u8 state,
+ u16 chanspec, u16 listen_ms)
+{
+ struct brcmf_p2p_disc_st_le discover_state;
+ s32 ret = 0;
+ brcmf_dbg(TRACE, "enter\n");
+
+ discover_state.state = state;
+ discover_state.chspec = cpu_to_le16(chanspec);
+ discover_state.dwell = cpu_to_le16(listen_ms);
+ ret = brcmf_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state,
+ sizeof(discover_state));
+ return ret;
+}
+
+/**
+ * brcmf_p2p_deinit_discovery() - disable P2P device discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Resets the discovery state and disables it in firmware.
+ */
+static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ /* Set the discovery state to SCAN */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ (void)brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+
+ /* Disable P2P discovery in the firmware */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ (void)brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 0);
+
+ return 0;
+}
+
+/**
+ * brcmf_p2p_enable_discovery() - initialize and configure discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Initializes the discovery device and configure the virtual interface.
+ */
+static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 ret = 0;
+
+ brcmf_dbg(TRACE, "enter\n");
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (!vif) {
+ brcmf_err("P2P config device not available\n");
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status)) {
+ brcmf_dbg(INFO, "P2P config device already configured\n");
+ goto exit;
+ }
+
+ /* Re-initialize P2P Discovery in the firmware */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1);
+ if (ret < 0) {
+ brcmf_err("set p2p_disc error\n");
+ goto exit;
+ }
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+ if (ret < 0) {
+ brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n");
+ goto exit;
+ }
+
+ /*
+ * Set wsec to any non-zero value in the discovery bsscfg
+ * to ensure our P2P probe responses have the privacy bit
+ * set in the 802.11 WPA IE. Some peer devices may not
+ * initiate WPS with us if this bit is not set.
+ */
+ ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED);
+ if (ret < 0) {
+ brcmf_err("wsec error %d\n", ret);
+ goto exit;
+ }
+
+ set_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status);
+exit:
+ return ret;
+}
+
+/**
+ * brcmf_p2p_escan() - initiate a P2P scan.
+ *
+ * @p2p: P2P specific data.
+ * @num_chans: number of channels to scan.
+ * @chanspecs: channel parameters for @num_chans channels.
+ * @search_state: P2P discover state to use.
+ * @action: scan action to pass to firmware.
+ * @bss_type: type of P2P bss.
+ */
+static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
+ u16 chanspecs[], s32 search_state, u16 action,
+ enum p2p_bss_type bss_type)
+{
+ s32 ret = 0;
+ s32 memsize = offsetof(struct brcmf_p2p_scan_le,
+ eparams.params_le.channel_list);
+ s32 nprobes;
+ s32 active;
+ u32 i;
+ u8 *memblk;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_p2p_scan_le *p2p_params;
+ struct brcmf_scan_params_le *sparams;
+ struct brcmf_ssid ssid;
+
+ memsize += num_chans * sizeof(__le16);
+ memblk = kzalloc(memsize, GFP_KERNEL);
+ if (!memblk)
+ return -ENOMEM;
+
+ vif = p2p->bss_idx[bss_type].vif;
+ if (vif == NULL) {
+ brcmf_err("no vif for bss type %d\n", bss_type);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ switch (search_state) {
+ case WL_P2P_DISC_ST_SEARCH:
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ /* use null ssid */
+ ssid.SSID_len = 0;
+ memset(ssid.SSID, 0, sizeof(ssid.SSID));
+ break;
+ case WL_P2P_DISC_ST_SCAN:
+ /*
+ * wpa_supplicant has p2p_find command with type social or
+ * progressive. For progressive, we need to set the ssid to
+ * P2P WILDCARD because we just do broadcast scan unless
+ * setting SSID.
+ */
+ ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN;
+ memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len);
+ break;
+ default:
+ brcmf_err(" invalid search state %d\n", search_state);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ brcmf_p2p_set_discover_state(vif->ifp, search_state, 0, 0);
+
+ /*
+ * set p2p scan parameters.
+ */
+ p2p_params = (struct brcmf_p2p_scan_le *)memblk;
+ p2p_params->type = 'E';
+
+ /* determine the scan engine parameters */
+ sparams = &p2p_params->eparams.params_le;
+ sparams->bss_type = DOT11_BSSTYPE_ANY;
+ if (p2p->cfg->active_scan)
+ sparams->scan_type = 0;
+ else
+ sparams->scan_type = 1;
+
+ memset(&sparams->bssid, 0xFF, ETH_ALEN);
+ if (ssid.SSID_len)
+ memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
+ sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+ sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS);
+
+ /*
+ * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan
+ * supported by the supplicant.
+ */
+ if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1))
+ active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+ else if (num_chans == AF_PEER_SEARCH_CNT)
+ active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+ else if (wl_get_vif_state_all(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
+ active = -1;
+ else
+ active = P2PAPI_SCAN_DWELL_TIME_MS;
+
+ /* Override scan params to find a peer for a connection */
+ if (num_chans == 1) {
+ active = WL_SCAN_CONNECT_DWELL_TIME_MS;
+ /* WAR to sync with presence period of VSDB GO.
+ * send probe request more frequently
+ */
+ nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+ } else {
+ nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS;
+ }
+
+ if (nprobes <= 0)
+ nprobes = 1;
+
+ brcmf_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active);
+ sparams->active_time = cpu_to_le32(active);
+ sparams->nprobes = cpu_to_le32(nprobes);
+ sparams->passive_time = cpu_to_le32(-1);
+ sparams->channel_num = cpu_to_le32(num_chans &
+ BRCMF_SCAN_PARAMS_COUNT_MASK);
+ for (i = 0; i < num_chans; i++)
+ sparams->channel_list[i] = cpu_to_le16(chanspecs[i]);
+
+ /* set the escan specific parameters */
+ p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+ p2p_params->eparams.action = cpu_to_le16(action);
+ p2p_params->eparams.sync_id = cpu_to_le16(0x1234);
+ /* perform p2p scan on primary device */
+ ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize);
+ if (!ret)
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status);
+exit:
+ kfree(memblk);
+ return ret;
+}
+
+/**
+ * brcmf_p2p_run_escan() - escan callback for peer-to-peer.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device for which scan is requested.
+ * @request: scan request from cfg80211.
+ * @action: scan action.
+ *
+ * Determines the P2P discovery state based to scan request parameters and
+ * validates the channels in the request.
+ */
+static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ u16 action)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ s32 err = 0;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *dev = NULL;
+ int i, num_nodfs = 0;
+ u16 *chanspecs;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ if (!request) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (request->n_channels) {
+ chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs),
+ GFP_KERNEL);
+ if (!chanspecs) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ if (vif)
+ dev = vif->wdev.netdev;
+ if (request->n_channels == 3 &&
+ request->channels[0]->hw_value == SOCIAL_CHAN_1 &&
+ request->channels[1]->hw_value == SOCIAL_CHAN_2 &&
+ request->channels[2]->hw_value == SOCIAL_CHAN_3) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+ } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+ /* If you are already a GO, then do SEARCH only */
+ brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ } else {
+ brcmf_dbg(INFO, "P2P SCAN STATE START\n");
+ }
+
+ /*
+ * no P2P scanning on passive or DFS channels.
+ */
+ for (i = 0; i < request->n_channels; i++) {
+ struct ieee80211_channel *chan = request->channels[i];
+
+ if (chan->flags & (IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_PASSIVE_SCAN))
+ continue;
+
+ chanspecs[i] = channel_to_chanspec(chan);
+ brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n",
+ num_nodfs, chan->hw_value, chanspecs[i]);
+ num_nodfs++;
+ }
+ err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
+ action, P2PAPI_BSSCFG_DEVICE);
+ }
+exit:
+ if (err)
+ brcmf_err("error (%d)\n", err);
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_find_listen_channel() - find listen channel in ie string.
+ *
+ * @ie: string of information elements.
+ * @ie_len: length of string.
+ *
+ * Scan ie for p2p ie and look for attribute 6 channel. If available determine
+ * channel and return it.
+ */
+static s32 brcmf_p2p_find_listen_channel(const u8 *ie, u32 ie_len)
+{
+ u8 channel_ie[5];
+ s32 listen_channel;
+ s32 err;
+
+ err = cfg80211_get_p2p_attr(ie, ie_len,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ channel_ie, sizeof(channel_ie));
+ if (err < 0)
+ return err;
+
+ /* listen channel subel length format: */
+ /* 3(country) + 1(op. class) + 1(chan num) */
+ listen_channel = (s32)channel_ie[3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ brcmf_dbg(INFO, "Found my Listen Channel %d\n", listen_channel);
+ return listen_channel;
+ }
+
+ return -EPERM;
+}
+
+
+/**
+ * brcmf_p2p_scan_prep() - prepare scan based on request.
+ *
+ * @wiphy: wiphy device.
+ * @request: scan request from cfg80211.
+ * @vif: vif on which scan request is to be executed.
+ *
+ * Prepare the scan appropriately for type of scan requested. Overrides the
+ * escan .run() callback for peer-to-peer scanning.
+ */
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request,
+ struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ int err = 0;
+
+ if (brcmf_p2p_scan_is_p2p_request(request)) {
+ /* find my listen channel */
+ err = brcmf_p2p_find_listen_channel(request->ie,
+ request->ie_len);
+ if (err < 0)
+ return err;
+
+ p2p->afx_hdl.my_listen_chan = err;
+
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+ err = brcmf_p2p_enable_discovery(p2p);
+ if (err)
+ return err;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
+ /* override .run_escan() callback. */
+ cfg->escan_info.run = brcmf_p2p_run_escan;
+ }
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_discover_listen() - set firmware to discover listen state.
+ *
+ * @p2p: p2p device.
+ * @channel: channel nr for discover listen.
+ * @duration: time in ms to stay on channel.
+ *
+ */
+static s32
+brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ u16 chanspec;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (!vif) {
+ brcmf_err("Discovery is not set, so we have nothing to do\n");
+ err = -EPERM;
+ goto exit;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) {
+ brcmf_err("Previous LISTEN is not completed yet\n");
+ /* WAR: prevent cookie mismatch in wpa_supplicant return OK */
+ goto exit;
+ }
+
+ chanspec = brcmf_p2p_chnr_to_chspec(channel);
+ err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN,
+ chanspec, (u16)duration);
+ if (!err) {
+ set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status);
+ p2p->remain_on_channel_cookie++;
+ }
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_remain_on_channel() - put device on channel and stay there.
+ *
+ * @wiphy: wiphy device.
+ * @channel: channel to stay on.
+ * @duration: time in ms to remain on channel.
+ *
+ */
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ s32 err;
+ u16 channel_nr;
+
+ channel_nr = ieee80211_frequency_to_channel(channel->center_freq);
+ brcmf_dbg(TRACE, "Enter, channel: %d, duration ms (%d)\n", channel_nr,
+ duration);
+
+ err = brcmf_p2p_enable_discovery(p2p);
+ if (err)
+ goto exit;
+ err = brcmf_p2p_discover_listen(p2p, channel_nr, duration);
+ if (err)
+ goto exit;
+
+ memcpy(&p2p->remain_on_channel, channel, sizeof(*channel));
+ *cookie = p2p->remain_on_channel_cookie;
+ cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL);
+
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_notify_listen_complete() - p2p listen has completed.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message. Not used.
+ *
+ */
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+ brcmf_dbg(TRACE, "Enter\n");
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+ &p2p->status)) {
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status)) {
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status);
+ brcmf_dbg(INFO, "Listen DONE, wake up wait_next_af\n");
+ complete(&p2p->wait_next_af);
+ }
+
+ cfg80211_remain_on_channel_expired(&ifp->vif->wdev,
+ p2p->remain_on_channel_cookie,
+ &p2p->remain_on_channel,
+ GFP_KERNEL);
+ }
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_cancel_remain_on_channel() - cancel p2p listen state.
+ *
+ * @ifp: interfac control.
+ *
+ */
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
+{
+ if (!ifp)
+ return;
+ brcmf_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+ brcmf_p2p_notify_listen_complete(ifp, NULL, NULL);
+}
+
+
+/**
+ * brcmf_p2p_act_frm_search() - search function for action frame.
+ *
+ * @p2p: p2p device.
+ * channel: channel on which action frame is to be trasmitted.
+ *
+ * search function to reach at common channel to send action frame. When
+ * channel is 0 then all social channels will be used to send af
+ */
+static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
+{
+ s32 err;
+ u32 channel_cnt;
+ u16 *default_chan_list;
+ u32 i;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (channel)
+ channel_cnt = AF_PEER_SEARCH_CNT;
+ else
+ channel_cnt = SOCIAL_CHAN_CNT;
+ default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ brcmf_err("channel list allocation failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (channel) {
+ /* insert same channel to the chan_list */
+ for (i = 0; i < channel_cnt; i++)
+ default_chan_list[i] =
+ brcmf_p2p_chnr_to_chspec(channel);
+ } else {
+ default_chan_list[0] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_1);
+ default_chan_list[1] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_2);
+ default_chan_list[2] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_3);
+ }
+ err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list,
+ WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START,
+ P2PAPI_BSSCFG_DEVICE);
+ kfree(default_chan_list);
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_afx_handler() - afx worker thread.
+ *
+ * @work:
+ *
+ */
+static void brcmf_p2p_afx_handler(struct work_struct *work)
+{
+ struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work);
+ struct brcmf_p2p_info *p2p = container_of(afx_hdl,
+ struct brcmf_p2p_info,
+ afx_hdl);
+ s32 err;
+
+ if (!afx_hdl->is_active)
+ return;
+
+ if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
+ /* 100ms ~ 300ms */
+ err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
+ 100 * (1 + (random32() % 3)));
+ else
+ err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
+
+ if (err) {
+ brcmf_err("ERROR occurred! value is (%d)\n", err);
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status))
+ complete(&afx_hdl->act_frm_scan);
+ }
+}
+
+
+/**
+ * brcmf_p2p_af_searching_channel() - search channel.
+ *
+ * @p2p: p2p device info struct.
+ *
+ */
+static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+{
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct brcmf_cfg80211_vif *pri_vif;
+ unsigned long duration;
+ s32 retry;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
+ INIT_COMPLETION(afx_hdl->act_frm_scan);
+ set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+ afx_hdl->is_active = true;
+ afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
+
+ /* Loop to wait until we find a peer's channel or the
+ * pending action frame tx is cancelled.
+ */
+ retry = 0;
+ duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
+ while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
+ (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
+ afx_hdl->is_listen = false;
+ brcmf_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n",
+ retry);
+ /* search peer on peer's listen channel */
+ schedule_work(&afx_hdl->afx_work);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+ if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status)))
+ break;
+
+ if (afx_hdl->my_listen_chan) {
+ brcmf_dbg(TRACE, "Scheduling listen peer, channel=%d\n",
+ afx_hdl->my_listen_chan);
+ /* listen on my listen channel */
+ afx_hdl->is_listen = true;
+ schedule_work(&afx_hdl->afx_work);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+ duration);
+ }
+ if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status)))
+ break;
+ retry++;
+
+ /* if sta is connected or connecting, sleep for a while before
+ * retry af tx or finding a peer
+ */
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) ||
+ test_bit(BRCMF_VIF_STATUS_CONNECTING, &pri_vif->sme_state))
+ msleep(P2P_DEFAULT_SLEEP_TIME_VSDB);
+ }
+
+ brcmf_dbg(TRACE, "Completed search/listen peer_chan=%d\n",
+ afx_hdl->peer_chan);
+ afx_hdl->is_active = false;
+
+ clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+
+ return afx_hdl->peer_chan;
+}
+
+
+/**
+ * brcmf_p2p_scan_finding_common_channel() - was escan used for finding channel
+ *
+ * @cfg: common configuration struct.
+ * @bi: bss info struct, result from scan.
+ *
+ */
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi)
+
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ u8 *ie;
+ s32 err;
+ u8 p2p_dev_addr[ETH_ALEN];
+
+ if (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))
+ return false;
+
+ if (bi == NULL) {
+ brcmf_dbg(TRACE, "ACTION FRAME SCAN Done\n");
+ if (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)
+ complete(&afx_hdl->act_frm_scan);
+ return true;
+ }
+
+ ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+ memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr));
+ err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ p2p_dev_addr, sizeof(p2p_dev_addr));
+ if (err < 0)
+ err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ p2p_dev_addr, sizeof(p2p_dev_addr));
+ if ((err >= 0) &&
+ (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+ afx_hdl->peer_chan = bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
+ brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
+ afx_hdl->tx_dst_addr, afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+ return true;
+}
+
+/**
+ * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static void
+brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct net_device *ndev = cfg->escan_info.ndev;
+
+ if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
+ (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
+ test_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) {
+ brcmf_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n");
+ /* if channel is not zero, "actfame" uses off channel scan.
+ * So abort scan for off channel completion.
+ */
+ if (p2p->af_sent_channel)
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status)) {
+ brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
+ /* So abort scan to cancel listen */
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ }
+}
+
+
+/**
+ * brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
+ *
+ * @p2p: p2p device info struct.
+ *
+ * return true if recevied action frame is to be dropped.
+ */
+static bool
+brcmf_p2p_gon_req_collision(struct brcmf_p2p_info *p2p, u8 *mac)
+{
+ struct brcmf_cfg80211_info *cfg = p2p->cfg;
+ struct brcmf_if *ifp;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (!test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) ||
+ !p2p->gon_req_action)
+ return false;
+
+ brcmf_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n");
+ /* if sa(peer) addr is less than da(my) addr, then this device
+ * process peer's gon request and block to send gon req.
+ * if not (sa addr > da addr),
+ * this device will process gon request and drop gon req of peer.
+ */
+ ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+ if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) {
+ brcmf_dbg(INFO, "Block transmit gon req !!!\n");
+ p2p->block_gon_req_tx = true;
+ /* if we are finding a common channel for sending af,
+ * do not scan more to block to send current gon req
+ */
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status))
+ complete(&p2p->afx_hdl.act_frm_scan);
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status))
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ return false;
+ }
+
+ /* drop gon request of peer to process gon request by this device. */
+ brcmf_dbg(INFO, "Drop received gon req !!!\n");
+
+ return true;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_frame_rx() - received action frame.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message, containing action frame data.
+ *
+ */
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct wireless_dev *wdev;
+ u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u8 *frame = (u8 *)(rxframe + 1);
+ struct brcmf_p2p_pub_act_frame *act_frm;
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+ u16 chanspec = be16_to_cpu(rxframe->chanspec);
+ struct ieee80211_mgmt *mgmt_frame;
+ s32 freq;
+ u16 mgmt_type;
+ u8 action;
+
+ /* Check if wpa_supplicant has registered for this frame */
+ brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg);
+ mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4;
+ if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+ return 0;
+
+ brcmf_p2p_print_actframe(false, frame, mgmt_frame_len);
+
+ action = P2P_PAF_SUBTYPE_INVALID;
+ if (brcmf_p2p_is_pub_action(frame, mgmt_frame_len)) {
+ act_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ action = act_frm->subtype;
+ if ((action == P2P_PAF_GON_REQ) &&
+ (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status) &&
+ (memcmp(afx_hdl->tx_dst_addr, e->addr,
+ ETH_ALEN) == 0)) {
+ afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+ brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
+ afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+ return 0;
+ }
+ /* After complete GO Negotiation, roll back to mpc mode */
+ if ((action == P2P_PAF_GON_CONF) ||
+ (action == P2P_PAF_PROVDIS_RSP))
+ brcmf_set_mpc(ifp->ndev, 1);
+ if (action == P2P_PAF_GON_CONF) {
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ }
+ } else if (brcmf_p2p_is_gas_action(frame, mgmt_frame_len)) {
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ action = sd_act_frm->action;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+ (p2p->next_af_subtype == action)) {
+ brcmf_dbg(TRACE, "We got a right next frame! (%d)\n", action);
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status);
+ /* Stop waiting for next AF. */
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ }
+
+ mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) +
+ mgmt_frame_len, GFP_KERNEL);
+ if (!mgmt_frame) {
+ brcmf_err("No memory available for action frame\n");
+ return -ENOMEM;
+ }
+ memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+ brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+ ETH_ALEN);
+ memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+ mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+ memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
+ mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS2G(chanspec) ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ);
+ wdev = ifp->ndev->ieee80211_ptr;
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ GFP_ATOMIC);
+
+ kfree(mgmt_frame);
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_tx_complete() - transmit action frame complete
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: not used.
+ *
+ */
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+ brcmf_dbg(INFO, "Enter: event %s, status=%d\n",
+ e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ?
+ "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE",
+ e->status);
+
+ if (!test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status))
+ return 0;
+
+ if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
+ if (e->status == BRCMF_E_STATUS_SUCCESS)
+ set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ &p2p->status);
+ else {
+ set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+ /* If there is no ack, we don't need to wait for
+ * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
+ */
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ }
+
+ } else {
+ complete(&p2p->send_af_done);
+ }
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_tx_action_frame() - send action frame over fil.
+ *
+ * @p2p: p2p info struct for vif.
+ * @af_params: action frame data/info.
+ *
+ * Send an action frame immediately without doing channel synchronization.
+ *
+ * This function waits for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ */
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+ struct brcmf_fil_af_params_le *af_params)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 timeout = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ INIT_COMPLETION(p2p->send_af_done);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+ sizeof(*af_params));
+ if (err) {
+ brcmf_err(" sending action frame has failed\n");
+ goto exit;
+ }
+
+ p2p->af_sent_channel = le32_to_cpu(af_params->channel);
+ p2p->af_tx_sent_jiffies = jiffies;
+
+ timeout = wait_for_completion_timeout(&p2p->send_af_done,
+ msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME));
+
+ if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
+ brcmf_dbg(TRACE, "TX action frame operation is success\n");
+ } else {
+ err = -EIO;
+ brcmf_dbg(TRACE, "TX action frame operation has failed\n");
+ }
+ /* clear status bit for action tx */
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_pub_af_tx() - public action frame tx routine.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @af_params: action frame data/info.
+ * @config_af_params: configuration data for action frame.
+ *
+ * routine which transmits ation frame public type.
+ */
+static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_fil_af_params_le *af_params,
+ struct brcmf_config_af_params *config_af_params)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_p2p_pub_act_frame *act_frm;
+ s32 err = 0;
+ u16 ie_len;
+
+ action_frame = &af_params->action_frame;
+ act_frm = (struct brcmf_p2p_pub_act_frame *)(action_frame->data);
+
+ config_af_params->extra_listen = true;
+
+ switch (act_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n");
+ set_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ config_af_params->mpc_onoff = 0;
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ p2p->gon_req_action = true;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_GON_RSP:
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for CONF frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_GON_CONF:
+ /* If we reached till GO Neg confirmation reset the filter */
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ /* turn on mpc again if go nego is done */
+ config_af_params->mpc_onoff = 1;
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_INVITE_REQ:
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_INVITE_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* maximize dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME);
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ ie_len = le16_to_cpu(action_frame->len) -
+ offsetof(struct brcmf_p2p_pub_act_frame, elts);
+ if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ NULL, 0) < 0)
+ config_af_params->search_channel = true;
+ config_af_params->mpc_onoff = 0;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ /* wpa_supplicant send go nego req right after prov disc */
+ p2p->next_af_subtype = P2P_PAF_GON_REQ;
+ /* increase dwell time to MED level */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ default:
+ brcmf_err("Unknown p2p pub act frame subtype: %d\n",
+ act_frm->subtype);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/**
+ * brcmf_p2p_send_action_frame() - send action frame .
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device to transmit on.
+ * @af_params: configuration data for action frame.
+ */
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct brcmf_fil_af_params_le *af_params)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_config_af_params config_af_params;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ u16 action_frame_len;
+ bool ack = false;
+ u8 category;
+ u8 action;
+ s32 tx_retry;
+ s32 extra_listen_time;
+ uint delta_ms;
+
+ action_frame = &af_params->action_frame;
+ action_frame_len = le16_to_cpu(action_frame->len);
+
+ brcmf_p2p_print_actframe(true, action_frame->data, action_frame_len);
+
+ /* Add the default dwell time. Dwell time to stay off-channel */
+ /* to wait for a response action frame after transmitting an */
+ /* GO Negotiation action frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME);
+
+ category = action_frame->data[DOT11_ACTION_CAT_OFF];
+ action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+ /* initialize variables */
+ p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+ p2p->gon_req_action = false;
+
+ /* config parameters */
+ config_af_params.mpc_onoff = -1;
+ config_af_params.search_channel = false;
+ config_af_params.extra_listen = false;
+
+ if (brcmf_p2p_is_pub_action(action_frame->data, action_frame_len)) {
+ /* p2p public action frame process */
+ if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) {
+ /* Just send unknown subtype frame with */
+ /* default parameters. */
+ brcmf_err("P2P Public action frame, unknown subtype.\n");
+ }
+ } else if (brcmf_p2p_is_gas_action(action_frame->data,
+ action_frame_len)) {
+ /* service discovery process */
+ if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+ action == P2PSD_ACTION_ID_GAS_CREQ) {
+ /* configure service discovery query frame */
+ config_af_params.search_channel = true;
+
+ /* save next af suptype to cancel */
+ /* remaining dwell time */
+ p2p->next_af_subtype = action + 1;
+
+ af_params->dwell_time =
+ cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ } else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+ action == P2PSD_ACTION_ID_GAS_CRESP) {
+ /* configure service discovery response frame */
+ af_params->dwell_time =
+ cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ } else {
+ brcmf_err("Unknown action type: %d\n", action);
+ goto exit;
+ }
+ } else if (brcmf_p2p_is_p2p_action(action_frame->data,
+ action_frame_len)) {
+ /* do not configure anything. it will be */
+ /* sent with a default configuration */
+ } else {
+ brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n",
+ category, action);
+ return false;
+ }
+
+ /* if connecting on primary iface, sleep for a while before sending
+ * af tx for VSDB
+ */
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state))
+ msleep(50);
+
+ /* if scan is ongoing, abort current scan. */
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_abort_scanning(cfg);
+
+ memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN);
+
+ /* To make sure to send successfully action frame, turn off mpc */
+ if (config_af_params.mpc_onoff == 0)
+ brcmf_set_mpc(ndev, 0);
+
+ /* set status and destination address before sending af */
+ if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+ /* set status to cancel the remained dwell time in rx process */
+ set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+ }
+
+ p2p->af_sent_channel = 0;
+ set_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+ /* validate channel and p2p ies */
+ if (config_af_params.search_channel &&
+ IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) &&
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) {
+ afx_hdl = &p2p->afx_hdl;
+ afx_hdl->peer_listen_chan = le32_to_cpu(af_params->channel);
+
+ if (brcmf_p2p_af_searching_channel(p2p) ==
+ P2P_INVALID_CHANNEL) {
+ brcmf_err("Couldn't find peer's channel.\n");
+ goto exit;
+ }
+
+ /* Abort scan even for VSDB scenarios. Scan gets aborted in
+ * firmware but after the check of piggyback algorithm. To take
+ * care of current piggback algo, lets abort the scan here
+ * itself.
+ */
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+
+ /* update channel */
+ af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
+ }
+
+ tx_retry = 0;
+ while (!p2p->block_gon_req_tx &&
+ (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+ ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+ tx_retry++;
+ }
+ if (ack == false) {
+ brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry);
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ }
+
+exit:
+ clear_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+
+ /* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+ * if we coundn't get the next action response frame and dongle does
+ * not keep the dwell time, go to listen state again to get next action
+ * response frame.
+ */
+ if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx &&
+ test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+ p2p->af_sent_channel == afx_hdl->my_listen_chan) {
+ delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies);
+ if (le32_to_cpu(af_params->dwell_time) > delta_ms)
+ extra_listen_time = le32_to_cpu(af_params->dwell_time) -
+ delta_ms;
+ else
+ extra_listen_time = 0;
+ if (extra_listen_time > 50) {
+ set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status);
+ brcmf_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n",
+ le32_to_cpu(af_params->dwell_time),
+ extra_listen_time);
+ extra_listen_time += 100;
+ if (!brcmf_p2p_discover_listen(p2p,
+ p2p->af_sent_channel,
+ extra_listen_time)) {
+ unsigned long duration;
+
+ extra_listen_time += 100;
+ duration = msecs_to_jiffies(extra_listen_time);
+ wait_for_completion_timeout(&p2p->wait_next_af,
+ duration);
+ }
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status);
+ }
+ }
+
+ if (p2p->block_gon_req_tx) {
+ /* if ack is true, supplicant will wait more time(100ms).
+ * so we will return it as a success to get more time .
+ */
+ p2p->block_gon_req_tx = false;
+ ack = true;
+ }
+
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+ /* if all done, turn mpc on again */
+ if (config_af_params.mpc_onoff == 1)
+ brcmf_set_mpc(ndev, 1);
+
+ return ack;
+}
+
+/**
+ * brcmf_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req.
+ *
+ * @ifp: interface pointer for which event was received.
+ * @e: even message.
+ * @data: payload of event message (probe request).
+ */
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct wireless_dev *wdev;
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u16 chanspec = be16_to_cpu(rxframe->chanspec);
+ u8 *mgmt_frame;
+ u32 mgmt_frame_len;
+ s32 freq;
+ u16 mgmt_type;
+
+ brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
+ e->reason);
+
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
+ (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+ afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+ brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
+ afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+
+ /* Firmware sends us two proberesponses for each idx one. At the */
+ /* moment anything but bsscfgidx 0 is passed up to supplicant */
+ if (e->bsscfgidx == 0)
+ return 0;
+
+ /* Filter any P2P probe reqs arriving during the GO-NEG Phase */
+ if (test_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) {
+ brcmf_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n");
+ return 0;
+ }
+
+ /* Check if wpa_supplicant has registered for this frame */
+ brcmf_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg);
+ mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4;
+ if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+ return 0;
+
+ mgmt_frame = (u8 *)(rxframe + 1);
+ mgmt_frame_len = e->datalen - sizeof(*rxframe);
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS2G(chanspec) ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ);
+ wdev = ifp->ndev->ieee80211_ptr;
+ cfg80211_rx_mgmt(wdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+ brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
+ mgmt_frame_len, e->datalen, chanspec, freq);
+
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_attach() - attach for P2P.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ */
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_if *pri_ifp;
+ struct brcmf_if *p2p_ifp;
+ struct brcmf_cfg80211_vif *p2p_vif;
+ struct brcmf_p2p_info *p2p;
+ struct brcmf_pub *drvr;
+ s32 bssidx;
+ s32 err = 0;
+
+ p2p = &cfg->p2p;
+ p2p->cfg = cfg;
+
+ drvr = cfg->pub;
+
+ pri_ifp = drvr->iflist[0];
+ p2p_ifp = drvr->iflist[1];
+
+ p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
+ if (p2p_ifp) {
+ p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
+ false);
+ if (IS_ERR(p2p_vif)) {
+ brcmf_err("could not create discovery vif\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ p2p_vif->ifp = p2p_ifp;
+ p2p_ifp->vif = p2p_vif;
+ p2p_vif->wdev.netdev = p2p_ifp->ndev;
+ p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
+ SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
+
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
+
+ brcmf_p2p_generate_bss_mac(p2p);
+ brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
+
+ /* Initialize P2P Discovery in the firmware */
+ err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
+ if (err < 0) {
+ brcmf_err("set p2p_disc error\n");
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+ /* obtain bsscfg index for P2P discovery */
+ err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
+ if (err < 0) {
+ brcmf_err("retrieving discover bsscfg index failed\n");
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+ /* Verify that firmware uses same bssidx as driver !! */
+ if (p2p_ifp->bssidx != bssidx) {
+ brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
+ bssidx, p2p_ifp->bssidx);
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+
+ init_completion(&p2p->send_af_done);
+ INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+ init_completion(&p2p->afx_hdl.act_frm_scan);
+ init_completion(&p2p->wait_next_af);
+ }
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_detach() - detach P2P.
+ *
+ * @p2p: P2P specific data.
+ */
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (vif != NULL) {
+ brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+ brcmf_p2p_deinit_discovery(p2p);
+ /* remove discovery interface */
+ brcmf_free_vif(vif);
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+ }
+ /* just set it all to zero */
+ memset(p2p, 0, sizeof(*p2p));
+}
+
+/**
+ * brcmf_p2p_get_current_chanspec() - Get current operation channel.
+ *
+ * @p2p: P2P specific data.
+ * @chanspec: chanspec to be returned.
+ */
+static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
+ u16 *chanspec)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_fil_chan_info_le ci;
+ s32 err;
+
+ ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+
+ *chanspec = 11 & WL_CHANSPEC_CHAN_MASK;
+
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci));
+ if (!err) {
+ *chanspec = le32_to_cpu(ci.hw_channel) & WL_CHANSPEC_CHAN_MASK;
+ if (*chanspec < CH_MAX_2G_CHANNEL)
+ *chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ *chanspec |= WL_CHANSPEC_BAND_5G;
+ }
+ *chanspec |= WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+}
+
+/**
+ * Change a P2P Role.
+ * Parameters:
+ * @mac: MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+ enum brcmf_fil_p2p_if_types if_type)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_fil_p2p_if_le if_request;
+ s32 err;
+ u16 chanspec;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ if (!vif) {
+ brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
+ return -EPERM;
+ }
+ brcmf_notify_escan_complete(cfg, vif->ifp->ndev, true, true);
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ if (!vif) {
+ brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
+ return -EPERM;
+ }
+ brcmf_set_mpc(vif->ifp->ndev, 0);
+
+ /* In concurrency case, STA may be already associated in a particular */
+ /* channel. so retrieve the current channel of primary interface and */
+ /* then start the virtual interface on that. */
+ brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+ if_request.type = cpu_to_le16((u16)if_type);
+ if_request.chspec = cpu_to_le16(chanspec);
+ memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+ err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
+ sizeof(if_request));
+ if (err) {
+ brcmf_err("p2p_ifupd FAILED, err=%d\n", err);
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ return err;
+ }
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
+ return -EIO;
+ }
+
+ err = brcmf_fil_cmd_int_set(vif->ifp, BRCMF_C_SET_SCB_TIMEOUT,
+ BRCMF_SCB_TIMEOUT_VALUE);
+
+ return err;
+}
+
+static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
+ struct brcmf_if *ifp, u8 ea[ETH_ALEN],
+ enum brcmf_fil_p2p_if_types iftype)
+{
+ struct brcmf_fil_p2p_if_le if_request;
+ int err;
+ u16 chanspec;
+
+ /* we need a default channel */
+ brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+ /* fill the firmware request */
+ memcpy(if_request.addr, ea, ETH_ALEN);
+ if_request.type = cpu_to_le16((u16)iftype);
+ if_request.chspec = cpu_to_le16(chanspec);
+
+ err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
+ sizeof(if_request));
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+ struct net_device *pri_ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(pri_ndev);
+ u8 *addr = vif->wdev.netdev->dev_addr;
+
+ return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN);
+}
+
+static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+ struct net_device *pri_ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(pri_ndev);
+ u8 *addr = vif->wdev.netdev->dev_addr;
+
+ return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN);
+}
+
+/**
+ * brcmf_p2p_add_vif() - create a new P2P virtual interface.
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @type: nl80211 interface type.
+ * @flags: TBD
+ * @params: TBD
+ */
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ enum brcmf_fil_p2p_if_types iftype;
+ enum wl_mode mode;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return ERR_PTR(-EBUSY);
+
+ brcmf_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type);
+
+ switch (type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ iftype = BRCMF_FIL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ iftype = BRCMF_FIL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ vif = brcmf_alloc_vif(cfg, type, false);
+ if (IS_ERR(vif))
+ return (struct wireless_dev *)vif;
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+ err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
+ iftype);
+ if (err) {
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ goto fail;
+ }
+
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("timeout occurred\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* interface created in firmware */
+ ifp = vif->ifp;
+ if (!ifp) {
+ brcmf_err("no if pointer provided\n");
+ err = -ENOENT;
+ goto fail;
+ }
+
+ strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ err = brcmf_net_attach(ifp, true);
+ if (err) {
+ brcmf_err("Registering netdevice failed\n");
+ goto fail;
+ }
+ cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+ /* Disable firmware roaming for P2P interface */
+ brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (iftype == BRCMF_FIL_P2P_IF_GO) {
+ /* set station timeout for p2p */
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCB_TIMEOUT,
+ BRCMF_SCB_TIMEOUT_VALUE);
+ }
+ return &ifp->vif->wdev;
+
+fail:
+ brcmf_free_vif(vif);
+ return ERR_PTR(err);
+}
+
+/**
+ * brcmf_p2p_del_vif() - delete a P2P virtual interface.
+ *
+ * @wiphy: wiphy device of interface.
+ * @wdev: wireless device of interface.
+ *
+ * TODO: not yet supported.
+ */
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ unsigned long jiffie_timeout = msecs_to_jiffies(1500);
+ bool wait_for_disable = false;
+ int err;
+
+ brcmf_dbg(TRACE, "delete P2P vif\n");
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+ switch (vif->wdev.iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
+ wait_for_disable = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ if (!brcmf_p2p_disable_p2p_if(vif))
+ wait_for_disable = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return -ENOTSUPP;
+ break;
+ }
+
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+ if (wait_for_disable)
+ wait_for_completion_timeout(&cfg->vif_disabled,
+ msecs_to_jiffies(500));
+
+ brcmf_vif_clear_mgmt_ies(vif);
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+ err = brcmf_p2p_release_p2p_if(vif);
+ if (!err) {
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
+ jiffie_timeout);
+ if (!err)
+ err = -EIO;
+ else
+ err = 0;
+ }
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ brcmf_free_vif(vif);
+ p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+
+ return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
new file mode 100644
index 00000000000..6821b26224b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef WL_CFGP2P_H_
+#define WL_CFGP2P_H_
+
+#include <net/cfg80211.h>
+
+struct brcmf_cfg80211_info;
+
+/**
+ * enum p2p_bss_type - different type of BSS configurations.
+ *
+ * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
+ * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_MAX: used for range checking.
+ */
+enum p2p_bss_type {
+ P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_MAX
+};
+
+/**
+ * struct p2p_bss - peer-to-peer bss related information.
+ *
+ * @vif: virtual interface of this P2P bss.
+ * @private_data: TBD
+ */
+struct p2p_bss {
+ struct brcmf_cfg80211_vif *vif;
+ void *private_data;
+};
+
+/**
+ * enum brcmf_p2p_status - P2P specific dongle status.
+ *
+ * @BRCMF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_DEL: NOT-USED?
+ * @BRCMF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle.
+ * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed.
+ * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked.
+ * @BRCMF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing.
+ * @BRCMF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel.
+ * @BRCMF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response.
+ * @BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active.
+ */
+enum brcmf_p2p_status {
+ BRCMF_P2P_STATUS_ENABLED,
+ BRCMF_P2P_STATUS_IF_ADD,
+ BRCMF_P2P_STATUS_IF_DEL,
+ BRCMF_P2P_STATUS_IF_DELETING,
+ BRCMF_P2P_STATUS_IF_CHANGING,
+ BRCMF_P2P_STATUS_IF_CHANGED,
+ BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ BRCMF_P2P_STATUS_ACTION_TX_NOACK,
+ BRCMF_P2P_STATUS_GO_NEG_PHASE,
+ BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+ BRCMF_P2P_STATUS_SENDING_ACT_FRAME,
+ BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL
+};
+
+/**
+ * struct afx_hdl - action frame off channel storage.
+ *
+ * @afx_work: worker thread for searching channel
+ * @act_frm_scan: thread synchronizing struct.
+ * @is_active: channel searching active.
+ * @peer_chan: current channel.
+ * @is_listen: sets mode for afx worker.
+ * @my_listen_chan: this peers listen channel.
+ * @peer_listen_chan: remote peers listen channel.
+ * @tx_dst_addr: mac address where tx af should be sent to.
+ */
+struct afx_hdl {
+ struct work_struct afx_work;
+ struct completion act_frm_scan;
+ bool is_active;
+ s32 peer_chan;
+ bool is_listen;
+ u16 my_listen_chan;
+ u16 peer_listen_chan;
+ u8 tx_dst_addr[ETH_ALEN];
+};
+
+/**
+ * struct brcmf_p2p_info - p2p specific driver information.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @status: status of P2P (see enum brcmf_p2p_status).
+ * @dev_addr: P2P device address.
+ * @int_addr: P2P interface address.
+ * @bss_idx: informate for P2P bss types.
+ * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @ssid: ssid for P2P GO.
+ * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @remain_on_channel: contains copy of struct used by cfg80211.
+ * @remain_on_channel_cookie: cookie counter for remain on channel cmd
+ * @next_af_subtype: expected action frame subtype.
+ * @send_af_done: indication that action frame tx is complete.
+ * @afx_hdl: action frame search handler info.
+ * @af_sent_channel: channel action frame is sent.
+ * @af_tx_sent_jiffies: jiffies time when af tx was transmitted.
+ * @wait_next_af: thread synchronizing struct.
+ * @gon_req_action: about to send go negotiation requets frame.
+ * @block_gon_req_tx: drop tx go negotiation requets frame.
+ */
+struct brcmf_p2p_info {
+ struct brcmf_cfg80211_info *cfg;
+ unsigned long status;
+ u8 dev_addr[ETH_ALEN];
+ u8 int_addr[ETH_ALEN];
+ struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+ struct timer_list listen_timer;
+ struct brcmf_ssid ssid;
+ u8 listen_channel;
+ struct ieee80211_channel remain_on_channel;
+ u32 remain_on_channel_cookie;
+ u8 next_af_subtype;
+ struct completion send_af_done;
+ struct afx_hdl afx_hdl;
+ u32 af_sent_channel;
+ unsigned long af_tx_sent_jiffies;
+ struct completion wait_next_af;
+ bool gon_req_action;
+ bool block_gon_req_tx;
+};
+
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params);
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+ enum brcmf_fil_p2p_if_types if_type);
+int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request,
+ struct brcmf_cfg80211_vif *vif);
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie);
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp);
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct brcmf_fil_af_params_le *af_params);
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi);
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+#endif /* WL_CFGP2P_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index e15630cc388..42289e9ea88 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -354,11 +354,10 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
int i;
struct brcmf_usbreq *req, *reqs;
- reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
- if (reqs == NULL) {
- brcmf_err("fail to allocate memory!\n");
+ reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
+ if (reqs == NULL)
return NULL;
- }
+
req = reqs;
for (i = 0; i < qsize; i++) {
@@ -421,10 +420,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
req->skb);
brcmf_usb_del_fromq(devinfo, req);
- if (urb->status == 0)
- devinfo->bus_pub.bus->dstats.tx_packets++;
- else
- devinfo->bus_pub.bus->dstats.tx_errors++;
brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
@@ -451,10 +446,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
req->skb = NULL;
/* zero lenght packets indicate usb "failure". Do not refill */
- if (urb->status == 0 && urb->actual_length) {
- devinfo->bus_pub.bus->dstats.rx_packets++;
- } else {
- devinfo->bus_pub.bus->dstats.rx_errors++;
+ if (urb->status != 0 || !urb->actual_length) {
brcmu_pkt_buf_free_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
return;
@@ -1257,6 +1249,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
bus->ops = &brcmf_usb_bus_ops;
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
/* Attach to the common driver interface */
ret = brcmf_attach(0, dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 62a528e8b95..cecc3eff72e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -26,6 +26,8 @@
#include <brcmu_wifi.h>
#include "dhd.h"
#include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
@@ -41,16 +43,13 @@
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_SCAN_INCOMPLETE 0
-#define BRCMF_IFACE_MAX_CNT 2
+#define BRCMF_IFACE_MAX_CNT 3
-#define TLV_LEN_OFF 1 /* length offset */
-#define TLV_HDR_LEN 2 /* header length */
-#define TLV_BODY_OFF 2 /* body offset */
-#define TLV_OUI_LEN 3 /* oui id length */
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_TYPE 1
#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */
#define WME_OUI_TYPE 2
+#define WPS_OUI_TYPE 4
#define VS_IE_FIXED_HDR_LEN 6
#define WPA_IE_VERSION_LEN 2
@@ -76,13 +75,15 @@
#define VNDR_IE_PKTFLAG_OFFSET 8
#define VNDR_IE_VSIE_OFFSET 12
#define VNDR_IE_HDR_SIZE 12
-#define VNDR_IE_BEACON_FLAG 0x1
-#define VNDR_IE_PRBRSP_FLAG 0x2
-#define MAX_VNDR_IE_NUMBER 5
+#define VNDR_IE_PARSE_LIMIT 5
#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
+#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
@@ -271,13 +272,6 @@ static const u32 __wl_cipher_suites[] = {
WLAN_CIPHER_SUITE_AES_CMAC,
};
-/* tag_ID/length/value_buffer tuple */
-struct brcmf_tlv {
- u8 id;
- u8 len;
- u8 data[1];
-};
-
/* Vendor specific ie. id = 221, oui and type defines exact ie */
struct brcmf_vs_tlv {
u8 id;
@@ -294,7 +288,7 @@ struct parsed_vndr_ie_info {
struct parsed_vndr_ies {
u32 count;
- struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+ struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
/* Quarter dBm units to mW
@@ -381,7 +375,7 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
-static u16 channel_to_chanspec(struct ieee80211_channel *ch)
+u16 channel_to_chanspec(struct ieee80211_channel *ch)
{
u16 chanspec;
@@ -393,19 +387,92 @@ static u16 channel_to_chanspec(struct ieee80211_channel *ch)
else
chanspec |= WL_CHANSPEC_BAND_5G;
- if (ch->flags & IEEE80211_CHAN_NO_HT40) {
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- } else {
- chanspec |= WL_CHANSPEC_BW_40;
- if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
- chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
- else
- chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
- }
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
return chanspec;
}
+/* Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+{
+ struct brcmf_tlv *elt;
+ int totlen;
+
+ elt = (struct brcmf_tlv *)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
+ return elt;
+
+ elt = (struct brcmf_tlv *)((u8 *)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+ u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return true;
+ }
+
+ if (tlvs == NULL)
+ return false;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return false;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpsie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
struct brcmf_wsec_key_le *key_le)
{
@@ -438,11 +505,153 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
return err;
}
+static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+void brcmf_set_mpc(struct net_device *ndev, int mpc)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err = 0;
+
+ if (check_vif_up(ifp->vif)) {
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
+ if (err) {
+ brcmf_err("fail to set mpc\n");
+ return;
+ }
+ brcmf_dbg(INFO, "MPC : %d\n", mpc);
+ }
+}
+
+s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ struct brcmf_scan_params_le params_le;
+ struct cfg80211_scan_request *scan_request;
+ s32 err = 0;
+
+ brcmf_dbg(SCAN, "Enter\n");
+
+ /* clear scan request, because the FW abort can cause a second call */
+ /* to this functon and might cause a double cfg80211_scan_done */
+ scan_request = cfg->scan_request;
+ cfg->scan_request = NULL;
+
+ if (timer_pending(&cfg->escan_timeout))
+ del_timer_sync(&cfg->escan_timeout);
+
+ if (fw_abort) {
+ /* Do a scan abort to stop the driver's scan engine */
+ brcmf_dbg(SCAN, "ABORT scan in firmware\n");
+ memset(&params_le, 0, sizeof(params_le));
+ memset(params_le.bssid, 0xFF, ETH_ALEN);
+ params_le.bss_type = DOT11_BSSTYPE_ANY;
+ params_le.scan_type = 0;
+ params_le.channel_num = cpu_to_le32(1);
+ params_le.nprobes = cpu_to_le32(1);
+ params_le.active_time = cpu_to_le32(-1);
+ params_le.passive_time = cpu_to_le32(-1);
+ params_le.home_time = cpu_to_le32(-1);
+ /* Scan is aborted by setting channel_list[0] to -1 */
+ params_le.channel_list[0] = cpu_to_le16(-1);
+ /* E-Scan (or anyother type) can be aborted by SCAN */
+ err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+ &params_le, sizeof(params_le));
+ if (err)
+ brcmf_err("Scan abort failed\n");
+ }
+ /*
+ * e-scan can be initiated by scheduled scan
+ * which takes precedence.
+ */
+ if (cfg->sched_escan) {
+ brcmf_dbg(SCAN, "scheduled scan completed\n");
+ cfg->sched_escan = false;
+ if (!aborted)
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+ brcmf_set_mpc(ndev, 1);
+ } else if (scan_request) {
+ brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(scan_request, aborted);
+ brcmf_set_mpc(ndev, 1);
+ }
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+
+ return err;
+}
+
+static
+int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev->netdev;
+
+ /* vif event pending in firmware */
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return -EBUSY;
+
+ if (ndev) {
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
+ cfg->escan_info.ndev == ndev)
+ brcmf_notify_escan_complete(cfg, ndev, true,
+ true);
+
+ brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
+ }
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return brcmf_p2p_del_vif(wiphy, wdev);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return -EINVAL;
+ }
+ return -EOPNOTSUPP;
+}
+
static s32
brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_vif *vif = ifp->vif;
s32 infra = 0;
@@ -462,10 +671,23 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
infra = 0;
break;
case NL80211_IFTYPE_STATION:
+ /* Ignore change for p2p IF. Unclear why supplicant does this */
+ if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
+ (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
+ brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
+ /* WAR: It is unexpected to get a change of VIF for P2P
+ * IF, but it happens. The request can not be handled
+ * but returning EPERM causes a crash. Returning 0
+ * without setting ieee80211_ptr->iftype causes trace
+ * (WARN_ON) but it works with wpa_supplicant
+ */
+ return 0;
+ }
vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
vif->mode = WL_MODE_AP;
ap = 1;
break;
@@ -475,8 +697,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
if (ap) {
- set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
- brcmf_dbg(INFO, "IF Type = AP\n");
+ if (type == NL80211_IFTYPE_P2P_GO) {
+ brcmf_dbg(INFO, "IF Type = P2P GO\n");
+ err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
+ }
+ if (!err) {
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
+ brcmf_dbg(INFO, "IF Type = AP\n");
+ }
} else {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
if (err) {
@@ -495,21 +723,6 @@ done:
return err;
}
-static void brcmf_set_mpc(struct net_device *ndev, int mpc)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err = 0;
-
- if (check_vif_up(ifp->vif)) {
- err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
- if (err) {
- brcmf_err("fail to set mpc\n");
- return;
- }
- brcmf_dbg(INFO, "MPC : %d\n", mpc);
- }
-}
-
static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
struct cfg80211_scan_request *request)
{
@@ -590,69 +803,6 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
}
static s32
-brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
- bool aborted, bool fw_abort)
-{
- struct brcmf_scan_params_le params_le;
- struct cfg80211_scan_request *scan_request;
- s32 err = 0;
-
- brcmf_dbg(SCAN, "Enter\n");
-
- /* clear scan request, because the FW abort can cause a second call */
- /* to this functon and might cause a double cfg80211_scan_done */
- scan_request = cfg->scan_request;
- cfg->scan_request = NULL;
-
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
-
- if (fw_abort) {
- /* Do a scan abort to stop the driver's scan engine */
- brcmf_dbg(SCAN, "ABORT scan in firmware\n");
- memset(&params_le, 0, sizeof(params_le));
- memset(params_le.bssid, 0xFF, ETH_ALEN);
- params_le.bss_type = DOT11_BSSTYPE_ANY;
- params_le.scan_type = 0;
- params_le.channel_num = cpu_to_le32(1);
- params_le.nprobes = cpu_to_le32(1);
- params_le.active_time = cpu_to_le32(-1);
- params_le.passive_time = cpu_to_le32(-1);
- params_le.home_time = cpu_to_le32(-1);
- /* Scan is aborted by setting channel_list[0] to -1 */
- params_le.channel_list[0] = cpu_to_le16(-1);
- /* E-Scan (or anyother type) can be aborted by SCAN */
- err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
- &params_le, sizeof(params_le));
- if (err)
- brcmf_err("Scan abort failed\n");
- }
- /*
- * e-scan can be initiated by scheduled scan
- * which takes precedence.
- */
- if (cfg->sched_escan) {
- brcmf_dbg(SCAN, "scheduled scan completed\n");
- cfg->sched_escan = false;
- if (!aborted)
- cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
- brcmf_set_mpc(ndev, 1);
- } else if (scan_request) {
- brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
- aborted ? "Aborted" : "Done");
- cfg80211_scan_done(scan_request, aborted);
- brcmf_set_mpc(ndev, 1);
- }
- if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- brcmf_err("Scan complete while device not scanning\n");
- return -EPERM;
- }
-
- return err;
-}
-
-static s32
brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
struct cfg80211_scan_request *request, u16 action)
{
@@ -703,11 +853,12 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
s32 err;
u32 passive_scan;
struct brcmf_scan_results *results;
+ struct escan_info *escan = &cfg->escan_info;
brcmf_dbg(SCAN, "Enter\n");
- cfg->escan_info.ndev = ndev;
- cfg->escan_info.wiphy = wiphy;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+ escan->ndev = ndev;
+ escan->wiphy = wiphy;
+ escan->escan_state = WL_ESCAN_STATE_SCANNING;
passive_scan = cfg->active_scan ? 0 : 1;
err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
passive_scan);
@@ -721,7 +872,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
results->count = 0;
results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
- err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+ err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START);
if (err)
brcmf_set_mpc(ndev, 1);
return err;
@@ -758,6 +909,12 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
return -EAGAIN;
}
+ /* If scan req comes for p2p0, send it over primary I/F */
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) {
+ ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ ndev = ifp->ndev;
+ }
+
/* Arm scan timeout timer */
mod_timer(&cfg->escan_timeout, jiffies +
WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
@@ -776,6 +933,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
cfg->scan_request = request;
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (escan_req) {
+ cfg->escan_info.run = brcmf_run_escan;
+ err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif);
+ if (err)
+ goto scan_out;
+
err = brcmf_do_escan(cfg, wiphy, ndev, request);
if (err)
goto scan_out;
@@ -933,31 +1095,6 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
memset(prof, 0, sizeof(*prof));
}
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
- size_t *join_params_size)
-{
- u16 chanspec = 0;
-
- if (ch != 0) {
- if (ch <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
- *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
- sizeof(u16);
-
- chanspec |= (ch & WL_CHANSPEC_CHAN_MASK);
- join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
- join_params->params_le.chanspec_num = cpu_to_le32(1);
-
- brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec);
- }
-}
-
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
{
s32 err = 0;
@@ -988,6 +1125,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
s32 wsec = 0;
s32 bcnprd;
+ u16 chanspec;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -1091,8 +1229,11 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
params->chandef.chan->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- brcmf_ch_to_chanspec(cfg->channel,
- &join_params, &join_params_size);
+ chanspec = channel_to_chanspec(params->chandef.chan);
+ join_params.params_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+ join_params_size += sizeof(join_params.params_le);
}
/* set channel for starter */
@@ -1155,7 +1296,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
brcmf_err("set wpa_auth failed (%d)\n", err);
return err;
@@ -1194,7 +1335,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
break;
}
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
if (err) {
brcmf_err("set auth failed (%d)\n", err);
return err;
@@ -1258,7 +1399,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
}
brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
+ /* In case of privacy, but no security and WPS then simulate */
+ /* setting AES. WPS-2.0 allows no security */
+ if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+ sme->privacy)
+ pval = AES_ENABLED;
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
if (err) {
brcmf_err("error (%d)\n", err);
return err;
@@ -1280,8 +1426,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
s32 err = 0;
if (sme->crypto.n_akm_suites) {
- err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
- "wpa_auth", &val);
+ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
brcmf_err("could not get wpa_auth (%d)\n", err);
return err;
@@ -1315,8 +1461,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
- "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev),
+ "wpa_auth", val);
if (err) {
brcmf_err("could not set wpa_auth (%d)\n", err);
return err;
@@ -1393,9 +1539,28 @@ brcmf_set_sharedkey(struct net_device *ndev,
return err;
}
+static
+enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
+ enum nl80211_auth_type type)
+{
+ u32 ci;
+ if (type == NL80211_AUTHTYPE_AUTOMATIC) {
+ /* shift to ignore chip revision */
+ ci = brcmf_get_chip_info(ifp) >> 4;
+ switch (ci) {
+ case 43236:
+ brcmf_dbg(CONN, "43236 WAR: use OPEN instead of AUTO\n");
+ return NL80211_AUTHTYPE_OPEN_SYSTEM;
+ default:
+ break;
+ }
+ }
+ return type;
+}
+
static s32
brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+ struct cfg80211_connect_params *sme)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -1403,7 +1568,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_ssid ssid;
+ struct brcmf_tlv *rsn_ie;
+ struct brcmf_vs_tlv *wpa_ie;
+ void *ie;
+ u32 ie_len;
+ struct brcmf_ext_join_params_le *ext_join_params;
+ u16 chanspec;
s32 err = 0;
@@ -1416,15 +1586,46 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
+ /* A normal (non P2P) connection request setup. */
+ ie = NULL;
+ ie_len = 0;
+ /* find the WPA_IE */
+ wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len);
+ if (wpa_ie) {
+ ie = wpa_ie;
+ ie_len = wpa_ie->len + TLV_HDR_LEN;
+ } else {
+ /* find the RSN_IE */
+ rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ WLAN_EID_RSN);
+ if (rsn_ie) {
+ ie = rsn_ie;
+ ie_len = rsn_ie->len + TLV_HDR_LEN;
+ }
+ }
+ brcmf_fil_iovar_data_set(ifp, "wpaie", ie, ie_len);
+ }
+
+ err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+ sme->ie, sme->ie_len);
+ if (err)
+ brcmf_err("Set Assoc REQ IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (chan) {
cfg->channel =
ieee80211_frequency_to_channel(chan->center_freq);
- brcmf_dbg(CONN, "channel (%d), center_req (%d)\n",
- cfg->channel, chan->center_freq);
- } else
+ chanspec = channel_to_chanspec(chan);
+ brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
+ cfg->channel, chan->center_freq, chanspec);
+ } else {
cfg->channel = 0;
+ chanspec = 0;
+ }
brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
@@ -1434,6 +1635,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
+ sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
err = brcmf_set_auth_type(ndev, sme);
if (err) {
brcmf_err("wl_set_auth_type failed (%d)\n", err);
@@ -1458,27 +1660,88 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
+ profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID),
+ (u32)sme->ssid_len);
+ memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+ if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ profile->ssid.SSID[profile->ssid.SSID_len] = 0;
+ brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID,
+ profile->ssid.SSID_len);
+ }
+
+ /* Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) +
+ offsetof(struct brcmf_assoc_params_le, chanspec_list);
+ if (cfg->channel)
+ join_params_size += sizeof(u16);
+ ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ goto done;
+ }
+ ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+ memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+ profile->ssid.SSID_len);
+ /*increase dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan_le.active_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+ ext_join_params->scan_le.passive_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+ /* Set up join scan parameters */
+ ext_join_params->scan_le.scan_type = -1;
+ /* to sync with presence period of VSDB GO.
+ * Send probe request more frequently. Probe request will be stopped
+ * when it gets probe response from target AP/GO.
+ */
+ ext_join_params->scan_le.nprobes =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+ if (sme->bssid)
+ memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
+ else
+ memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+
+ if (cfg->channel) {
+ ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
+
+ ext_join_params->assoc_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ }
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+ join_params_size);
+ kfree(ext_join_params);
+ if (!err)
+ /* This is it. join command worked, we are done */
+ goto done;
+
+ /* join command failed, fallback to set ssid */
memset(&join_params, 0, sizeof(join_params));
join_params_size = sizeof(join_params.ssid_le);
- profile->ssid.SSID_len = min_t(u32,
- sizeof(ssid.SSID), (u32)sme->ssid_len);
memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
- memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
- memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
-
- if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
- brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n",
- ssid.SSID, ssid.SSID_len);
+ if (sme->bssid)
+ memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
+ else
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
- brcmf_ch_to_chanspec(cfg->channel,
- &join_params, &join_params_size);
+ if (cfg->channel) {
+ join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+ join_params_size += sizeof(join_params.params_le);
+ }
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, join_params_size);
if (err)
- brcmf_err("WLC_SET_SSID failed (%d)\n", err);
+ brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err);
done:
if (err)
@@ -1937,7 +2200,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
/* Report the current tx rate */
- err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
if (err) {
brcmf_err("Could not get rate (%d)\n", err);
goto done;
@@ -2060,7 +2323,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (!bss)
return -ENOMEM;
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
return err;
}
@@ -2166,7 +2429,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
goto CleanUp;
}
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
CleanUp:
@@ -2182,78 +2445,10 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
return vif->mode == WL_MODE_IBSS;
}
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- */
-static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
-{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *) buf;
- totlen = buflen;
-
- /* find tagged parameter */
- while (totlen >= TLV_HDR_LEN) {
- int len = elt->len;
-
- /* validate remaining totlen */
- if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
- return elt;
-
- elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
- totlen -= (len + TLV_HDR_LEN);
- }
-
- return NULL;
-}
-
-/* Is any of the tlvs the expected entry? If
- * not update the tlvs buffer pointer/length.
- */
-static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
-{
- /* If the contents match the OUI and the type */
- if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
- !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
- type == ie[TLV_BODY_OFF + oui_len]) {
- return true;
- }
-
- if (tlvs == NULL)
- return false;
- /* point to the next ie */
- ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
- /* calculate the length of the rest of the buffer */
- *tlvs_len -= (int)(ie - *tlvs);
- /* update the pointer to the start of the buffer */
- *tlvs = ie;
-
- return false;
-}
-
-static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
{
- struct brcmf_tlv *ie;
-
- while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
- WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
- return (struct brcmf_vs_tlv *)ie;
- }
- return NULL;
-}
-
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
-{
- struct net_device *ndev = cfg_to_ndev(cfg);
- struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
- struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
@@ -2309,7 +2504,7 @@ update_bss_info_out:
return err;
}
-static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
{
struct escan_info *escan = &cfg->escan_info;
@@ -2328,8 +2523,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
container_of(work, struct brcmf_cfg80211_info,
escan_timeout_work);
- brcmf_notify_escan_complete(cfg,
- cfg->escan_info.ndev, true, true);
+ brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
static void brcmf_escan_timeout(unsigned long data)
@@ -2406,11 +2600,6 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
- if (!cfg->scan_request) {
- brcmf_dbg(SCAN, "result without cfg80211 request\n");
- goto exit;
- }
-
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -2418,6 +2607,14 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bss_info_le = &escan_result_le->bss_info_le;
+ if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
+ goto exit;
+
+ if (!cfg->scan_request) {
+ brcmf_dbg(SCAN, "result without cfg80211 request\n");
+ goto exit;
+ }
+
bi_length = le32_to_cpu(bss_info_le->length);
if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
WL_ESCAN_RESULTS_FIXED_SIZE)) {
@@ -2456,6 +2653,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
list->count++;
} else {
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
+ goto exit;
if (cfg->scan_request) {
cfg->bss_list = (struct brcmf_scan_results *)
cfg->escan_info.escan_buf;
@@ -2464,7 +2663,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
brcmf_notify_escan_complete(cfg, ndev, aborted,
false);
} else
- brcmf_err("Unexpected scan result 0x%x\n", status);
+ brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
+ status);
}
exit:
return err;
@@ -2968,9 +3168,8 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
}
#endif
-static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
/* set auth */
@@ -3229,7 +3428,7 @@ brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
parsed_info->vndrie.oui[2],
parsed_info->vndrie.oui_type);
- if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+ if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT)
break;
next:
remaining_len -= (ie->len + TLV_HDR_LEN);
@@ -3263,7 +3462,6 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
return ie_len + VNDR_IE_HDR_SIZE;
}
-static
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len)
{
@@ -3295,24 +3493,28 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
if (!iovar_ie_buf)
return -ENOMEM;
curr_ie_buf = iovar_ie_buf;
- if (ifp->vif->mode == WL_MODE_AP) {
- switch (pktflag) {
- case VNDR_IE_PRBRSP_FLAG:
- mgmt_ie_buf = saved_ie->probe_res_ie;
- mgmt_ie_len = &saved_ie->probe_res_ie_len;
- mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
- break;
- case VNDR_IE_BEACON_FLAG:
- mgmt_ie_buf = saved_ie->beacon_ie;
- mgmt_ie_len = &saved_ie->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
- break;
- default:
- err = -EPERM;
- brcmf_err("not suitable type\n");
- goto exit;
- }
- } else {
+ switch (pktflag) {
+ case BRCMF_VNDR_IE_PRBREQ_FLAG:
+ mgmt_ie_buf = saved_ie->probe_req_ie;
+ mgmt_ie_len = &saved_ie->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie);
+ break;
+ case BRCMF_VNDR_IE_PRBRSP_FLAG:
+ mgmt_ie_buf = saved_ie->probe_res_ie;
+ mgmt_ie_len = &saved_ie->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
+ break;
+ case BRCMF_VNDR_IE_BEACON_FLAG:
+ mgmt_ie_buf = saved_ie->beacon_ie;
+ mgmt_ie_len = &saved_ie->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
+ break;
+ case BRCMF_VNDR_IE_ASSOCREQ_FLAG:
+ mgmt_ie_buf = saved_ie->assoc_req_ie;
+ mgmt_ie_len = &saved_ie->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
+ break;
+ default:
err = -EPERM;
brcmf_err("not suitable type\n");
goto exit;
@@ -3421,6 +3623,49 @@ exit:
return err;
}
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
+{
+ s32 pktflags[] = {
+ BRCMF_VNDR_IE_PRBREQ_FLAG,
+ BRCMF_VNDR_IE_PRBRSP_FLAG,
+ BRCMF_VNDR_IE_BEACON_FLAG
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pktflags); i++)
+ brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
+
+ memset(&vif->saved_ie, 0, sizeof(vif->saved_ie));
+ return 0;
+}
+
+static s32
+brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
+ struct cfg80211_beacon_data *beacon)
+{
+ s32 err;
+
+ /* Set Beacon IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG,
+ beacon->tail, beacon->tail_len);
+ if (err) {
+ brcmf_err("Set Beacon IE Failed\n");
+ return err;
+ }
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
+
+ /* Set Probe Response IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBRSP_FLAG,
+ beacon->proberesp_ies,
+ beacon->proberesp_ies_len);
+ if (err)
+ brcmf_err("Set Probe Resp IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+
+ return err;
+}
+
static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
@@ -3433,7 +3678,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
- s32 bssidx = 0;
+ enum nl80211_iftype dev_role;
+ struct brcmf_fil_bss_enable_le bss_enable;
brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
cfg80211_get_chandef_type(&settings->chandef),
@@ -3443,10 +3689,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
- if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
- brcmf_err("Not in AP creation mode\n");
- return -EPERM;
- }
+ dev_role = ifp->vif->wdev.iftype;
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -3467,21 +3710,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_set_mpc(ndev, 0);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
- if (err < 0) {
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
- goto exit;
- }
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
- if (err < 0) {
- brcmf_err("SET INFRA error %d\n", err);
- goto exit;
- }
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
- if (err < 0) {
- brcmf_err("setting AP mode failed %d\n", err);
- goto exit;
- }
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3507,27 +3735,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
} else {
brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
- brcmf_configure_opensecurity(ndev, bssidx);
+ brcmf_configure_opensecurity(ifp);
}
- /* Set Beacon IEs to FW */
- err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
- VNDR_IE_BEACON_FLAG,
- settings->beacon.tail,
- settings->beacon.tail_len);
- if (err)
- brcmf_err("Set Beacon IE Failed\n");
- else
- brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
- /* Set Probe Response IEs to FW */
- err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
- VNDR_IE_PRBRSP_FLAG,
- settings->beacon.proberesp_ies,
- settings->beacon.proberesp_ies_len);
- if (err)
- brcmf_err("Set Probe Resp IE Failed\n");
- else
- brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+ brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
if (settings->beacon_interval) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
@@ -3545,22 +3756,62 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
}
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
- if (err < 0) {
- brcmf_err("BRCMF_C_UP error (%d)\n", err);
- goto exit;
+
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ goto exit;
+ }
+ brcmf_fil_iovar_int_set(ifp, "apsta", 0);
}
- memset(&join_params, 0, sizeof(join_params));
- /* join parameters starts with ssid */
- memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
- /* create softap */
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
- &join_params, sizeof(join_params));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- brcmf_err("SET SSID error (%d)\n", err);
+ brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
+ if (err < 0) {
+ brcmf_err("setting AP mode failed %d\n", err);
+ goto exit;
+ }
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_UP error (%d)\n", err);
+ goto exit;
+ }
+
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+ /* create softap */
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, sizeof(join_params));
+ if (err < 0) {
+ brcmf_err("SET SSID error (%d)\n", err);
+ goto exit;
+ }
+ brcmf_dbg(TRACE, "AP mode configuration complete\n");
+ } else {
+ err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
+ sizeof(ssid_le));
+ if (err < 0) {
+ brcmf_err("setting ssid failed %d\n", err);
+ goto exit;
+ }
+ bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+ bss_enable.enable = cpu_to_le32(1);
+ err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+ sizeof(bss_enable));
+ if (err < 0) {
+ brcmf_err("bss_enable config failed %d\n", err);
+ goto exit;
+ }
+
+ brcmf_dbg(TRACE, "GO mode configuration complete\n");
+ }
clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
@@ -3574,10 +3825,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = -EPERM;
+ struct brcmf_fil_bss_enable_le bss_enable;
brcmf_dbg(TRACE, "Enter\n");
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) {
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
@@ -3591,18 +3843,41 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("BRCMF_C_UP error %d\n", err);
goto exit;
}
- brcmf_set_mpc(ndev, 1);
- clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
- clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+ } else {
+ bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+ bss_enable.enable = cpu_to_le32(0);
+ err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+ sizeof(bss_enable));
+ if (err < 0)
+ brcmf_err("bss_enable config failed %d\n", err);
}
+ brcmf_set_mpc(ndev, 1);
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+
exit:
return err;
}
+static s32
+brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_beacon_data *info)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
+
+ return err;
+}
+
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
@@ -3612,6 +3887,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(TRACE, "Enter %pM\n", mac);
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+ ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (!check_vif_up(ifp->vif))
return -EIO;
@@ -3626,7 +3903,147 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
return err;
}
+
+static void
+brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct brcmf_if *ifp = netdev_priv(wdev->netdev);
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ u16 mgmt_type;
+
+ brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
+
+ mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+ if (reg)
+ vif->mgmt_rx_reg |= BIT(mgmt_type);
+ else
+ vif->mgmt_rx_reg &= ~BIT(mgmt_type);
+}
+
+
+static int
+brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, bool offchan,
+ unsigned int wait, const u8 *buf, size_t len,
+ bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ const struct ieee80211_mgmt *mgmt;
+ struct brcmf_if *ifp;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 ie_offset;
+ s32 ie_len;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_fil_af_params_le *af_params;
+ bool ack;
+ s32 chan_nr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ *cookie = 0;
+
+ mgmt = (const struct ieee80211_mgmt *)buf;
+
+ if (!ieee80211_is_mgmt(mgmt->frame_control)) {
+ brcmf_err("Driver only allows MGMT packet type\n");
+ return -EPERM;
+ }
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /* Right now the only reason to get a probe response */
+ /* is for p2p listen response or for p2p GO from */
+ /* wpa_supplicant. Unfortunately the probe is send */
+ /* on primary ndev, while dongle wants it on the p2p */
+ /* vif. Since this is only reason for a probe */
+ /* response to be sent, the vif is taken from cfg. */
+ /* If ever desired to send proberesp for non p2p */
+ /* response then data should be checked for */
+ /* "DIRECT-". Note in future supplicant will take */
+ /* dedicated p2p wdev to do this and then this 'hack'*/
+ /* is not needed anymore. */
+ ie_offset = DOT11_MGMT_HDR_LEN +
+ DOT11_BCN_PRB_FIXED_LEN;
+ ie_len = len - ie_offset;
+ ifp = netdev_priv(wdev->netdev);
+ vif = ifp->vif;
+ if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_vif_set_mgmt_ie(vif,
+ BRCMF_VNDR_IE_PRBRSP_FLAG,
+ &buf[ie_offset],
+ ie_len);
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ GFP_KERNEL);
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
+ af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+ if (af_params == NULL) {
+ brcmf_err("unable to allocate frame\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ action_frame = &af_params->action_frame;
+ /* Add the packet Id */
+ action_frame->packet_id = cpu_to_le32(*cookie);
+ /* Add BSSID */
+ memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN);
+ memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+ /* Add the channel */
+ chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
+ af_params->channel = cpu_to_le32(chan_nr);
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
+ le16_to_cpu(action_frame->len));
+
+ brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
+ *cookie, le16_to_cpu(action_frame->len),
+ chan->center_freq);
+
+ ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev,
+ af_params);
+
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+ GFP_KERNEL);
+ kfree(af_params);
+ } else {
+ brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control);
+ brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len);
+ }
+
+exit:
+ return err;
+}
+
+
+static int
+brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ int err = 0;
+
+ brcmf_dbg(TRACE, "Enter p2p listen cancel\n");
+
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (vif == NULL) {
+ brcmf_err("No p2p device available for probe response\n");
+ err = -ENODEV;
+ goto exit;
+ }
+ brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+exit:
+ return err;
+}
+
static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = brcmf_cfg80211_add_iface,
+ .del_virtual_intf = brcmf_cfg80211_del_iface,
.change_virtual_intf = brcmf_cfg80211_change_iface,
.scan = brcmf_cfg80211_scan,
.set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
@@ -3650,28 +4067,43 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.flush_pmksa = brcmf_cfg80211_flush_pmksa,
.start_ap = brcmf_cfg80211_start_ap,
.stop_ap = brcmf_cfg80211_stop_ap,
+ .change_beacon = brcmf_cfg80211_change_beacon,
.del_station = brcmf_cfg80211_del_station,
.sched_scan_start = brcmf_cfg80211_sched_scan_start,
.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+ .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
+ .mgmt_tx = brcmf_cfg80211_mgmt_tx,
+ .remain_on_channel = brcmf_p2p_remain_on_channel,
+ .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
#ifdef CONFIG_NL80211_TESTMODE
.testmode_cmd = brcmf_cfg80211_testmode
#endif
};
-static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
+static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
{
- s32 err = 0;
-
- switch (mode) {
- case WL_MODE_BSS:
- return NL80211_IFTYPE_STATION;
- case WL_MODE_IBSS:
- return NL80211_IFTYPE_ADHOC;
+ switch (type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return -ENOTSUPP;
+ case NL80211_IFTYPE_ADHOC:
+ return WL_MODE_IBSS;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return WL_MODE_BSS;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ return WL_MODE_AP;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return WL_MODE_P2P;
+ case NL80211_IFTYPE_UNSPECIFIED:
default:
- return NL80211_IFTYPE_UNSPECIFIED;
+ break;
}
- return err;
+ return -EINVAL;
}
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
@@ -3683,6 +4115,56 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
+static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO)
+ },
+};
+static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
+ {
+ .max_interfaces = BRCMF_IFACE_MAX_CNT,
+ .num_different_channels = 1, /* no multi-channel for now */
+ .n_limits = ARRAY_SIZE(brcmf_iface_limits),
+ .limits = brcmf_iface_limits
+ }
+};
+
+static const struct ieee80211_txrx_stypes
+brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ }
+};
+
static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
{
struct wiphy *wiphy;
@@ -3695,10 +4177,16 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
}
set_wiphy_dev(wiphy, phydev);
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+ wiphy->iface_combinations = brcmf_iface_combos;
+ wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
* it as 11a by default.
@@ -3710,10 +4198,11 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = __wl_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
- wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
- * save mode
- * by default
- */
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+ WIPHY_FLAG_OFFCHAN_TX |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->mgmt_stypes = brcmf_txrx_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
err = wiphy_register(wiphy);
if (err < 0) {
@@ -3724,31 +4213,25 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
return wiphy;
}
-static
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- struct net_device *netdev,
- s32 mode, bool pm_block)
+ enum nl80211_iftype type,
+ bool pm_block)
{
struct brcmf_cfg80211_vif *vif;
if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
return ERR_PTR(-ENOSPC);
+ brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
+ sizeof(*vif));
vif = kzalloc(sizeof(*vif), GFP_KERNEL);
if (!vif)
return ERR_PTR(-ENOMEM);
vif->wdev.wiphy = cfg->wiphy;
- vif->wdev.netdev = netdev;
- vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
-
- if (netdev) {
- vif->ifp = netdev_priv(netdev);
- netdev->ieee80211_ptr = &vif->wdev;
- SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
- }
+ vif->wdev.iftype = type;
- vif->mode = mode;
+ vif->mode = brcmf_nl80211_iftype_to_mode(type);
vif->pm_block = pm_block;
vif->roam_off = -1;
@@ -3759,7 +4242,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
return vif;
}
-static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
struct brcmf_cfg80211_info *cfg;
struct wiphy *wiphy;
@@ -3833,9 +4316,9 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->resp_ie_len = 0;
}
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
@@ -3911,9 +4394,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(TRACE, "Enter\n");
- brcmf_get_assoc_ies(cfg);
+ brcmf_get_assoc_ies(cfg, ifp);
memcpy(profile->bssid, e->addr, ETH_ALEN);
- brcmf_update_bss_info(cfg);
+ brcmf_update_bss_info(cfg, ifp);
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (buf == NULL) {
@@ -3968,9 +4451,11 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state)) {
if (completed) {
- brcmf_get_assoc_ies(cfg);
+ brcmf_get_assoc_ies(cfg, ifp);
memcpy(profile->bssid, e->addr, ETH_ALEN);
- brcmf_update_bss_info(cfg);
+ brcmf_update_bss_info(cfg, ifp);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
}
cfg80211_connect_result(ndev,
(u8 *)profile->bssid,
@@ -3981,9 +4466,6 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
completed ? WLAN_STATUS_SUCCESS :
WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
- if (completed)
- set_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state);
brcmf_dbg(CONN, "Report connect result - connection %s\n",
completed ? "succeeded" : "failed");
}
@@ -3996,38 +4478,38 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
- s32 err = 0;
+ static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
- u32 len = e->datalen;
- static int generation;
-
struct station_info sinfo;
brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
- memset(&sinfo, 0, sizeof(sinfo));
+ if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
+ ndev != cfg_to_ndev(cfg)) {
+ brcmf_dbg(CONN, "AP mode link down\n");
+ complete(&cfg->vif_disabled);
+ return 0;
+ }
- sinfo.filled = 0;
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
- reason == BRCMF_E_STATUS_SUCCESS) {
+ (reason == BRCMF_E_STATUS_SUCCESS)) {
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
if (!data) {
brcmf_err("No IEs present in ASSOC/REASSOC_IND");
return -EINVAL;
}
sinfo.assoc_req_ies = data;
- sinfo.assoc_req_ies_len = len;
+ sinfo.assoc_req_ies_len = e->datalen;
generation++;
sinfo.generation = generation;
- cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+ cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL);
} else if ((event == BRCMF_E_DISASSOC_IND) ||
(event == BRCMF_E_DEAUTH_IND) ||
(event == BRCMF_E_DEAUTH)) {
- generation++;
- sinfo.generation = generation;
- cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+ cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
}
- return err;
+ return 0;
}
static s32
@@ -4064,6 +4546,8 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
+ if (ndev != cfg_to_ndev(cfg))
+ complete(&cfg->vif_disabled);
} else if (brcmf_is_nonetwork(cfg, e)) {
if (brcmf_is_ibssmode(ifp->vif))
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -4112,6 +4596,57 @@ brcmf_notify_mic_status(struct brcmf_if *ifp,
return 0;
}
+static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data;
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+ struct brcmf_cfg80211_vif *vif;
+
+ brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n",
+ ifevent->action, ifevent->flags, ifevent->ifidx,
+ ifevent->bssidx);
+
+ mutex_lock(&event->vif_event_lock);
+ event->action = ifevent->action;
+ vif = event->vif;
+
+ switch (ifevent->action) {
+ case BRCMF_E_IF_ADD:
+ /* waiting process may have timed out */
+ if (!cfg->vif_event.vif)
+ return -EBADF;
+
+ ifp->vif = vif;
+ vif->ifp = ifp;
+ vif->wdev.netdev = ifp->ndev;
+ ifp->ndev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
+ mutex_unlock(&event->vif_event_lock);
+ wake_up(&event->vif_wq);
+ return 0;
+
+ case BRCMF_E_IF_DEL:
+ ifp->vif = NULL;
+ mutex_unlock(&event->vif_event_lock);
+ /* event may not be upon user request */
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ wake_up(&event->vif_wq);
+ return 0;
+
+ case BRCMF_E_IF_CHANGE:
+ mutex_unlock(&event->vif_event_lock);
+ wake_up(&event->vif_wq);
+ return 0;
+
+ default:
+ mutex_unlock(&event->vif_event_lock);
+ break;
+ }
+ return -EINVAL;
+}
+
static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
{
conf->frag_threshold = (u32)-1;
@@ -4143,6 +4678,18 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_notify_connect_status);
brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
brcmf_notify_sched_scan_results);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_IF,
+ brcmf_notify_vif_event);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_PROBEREQ_MSG,
+ brcmf_p2p_notify_rx_mgmt_p2p_probereq);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_DISC_LISTEN_COMPLETE,
+ brcmf_p2p_notify_listen_complete);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_RX,
+ brcmf_p2p_notify_action_frame_rx);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_COMPLETE,
+ brcmf_p2p_notify_action_tx_complete);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE,
+ brcmf_p2p_notify_action_tx_complete);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -4198,7 +4745,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
mutex_init(&cfg->usr_sync);
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
-
+ init_completion(&cfg->vif_disabled);
return err;
}
@@ -4209,6 +4756,12 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
brcmf_deinit_priv_mem(cfg);
}
+static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
+{
+ init_waitqueue_head(&event->vif_wq);
+ mutex_init(&event->vif_event_lock);
+}
+
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev)
{
@@ -4232,25 +4785,41 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg = wiphy_priv(wiphy);
cfg->wiphy = wiphy;
cfg->pub = drvr;
+ init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
- vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
wiphy_free(wiphy);
return NULL;
}
+ vif->ifp = ifp;
+ vif->wdev.netdev = ndev;
+ ndev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
err = wl_init_priv(cfg);
if (err) {
brcmf_err("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
-
ifp->vif = vif;
+
+ err = brcmf_p2p_attach(cfg);
+ if (err) {
+ brcmf_err("P2P initilisation failed (%d)\n", err);
+ goto cfg80211_p2p_attach_out;
+ }
+
return cfg;
+cfg80211_p2p_attach_out:
+ wl_deinit_priv(cfg);
+
cfg80211_attach_out:
brcmf_free_vif(vif);
+ wiphy_free(wiphy);
return NULL;
}
@@ -4489,3 +5058,57 @@ s32 brcmf_cfg80211_down(struct net_device *ndev)
return err;
}
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state)
+{
+ struct brcmf_cfg80211_vif *vif;
+ bool result = 0;
+
+ list_for_each_entry(vif, &cfg->vif_list, list) {
+ if (test_bit(state, &vif->sme_state))
+ result++;
+ }
+ return result;
+}
+
+static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
+ u8 action)
+{
+ u8 evt_action;
+
+ mutex_lock(&event->vif_event_lock);
+ evt_action = event->action;
+ mutex_unlock(&event->vif_event_lock);
+ return evt_action == action;
+}
+
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+ mutex_lock(&event->vif_event_lock);
+ event->vif = vif;
+ event->action = 0;
+ mutex_unlock(&event->vif_event_lock);
+}
+
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+ bool armed;
+
+ mutex_lock(&event->vif_event_lock);
+ armed = event->vif != NULL;
+ mutex_unlock(&event->vif_event_lock);
+
+ return armed;
+}
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+ return wait_event_timeout(event->vif_wq,
+ vif_event_equals(event, action), timeout);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index e4d9cc7a8e6..8b5d4989906 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -41,6 +41,38 @@
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define IE_MAX_LEN 512
+/* IE TLV processing */
+#define TLV_LEN_OFF 1 /* length offset */
+#define TLV_HDR_LEN 2 /* header length */
+#define TLV_BODY_OFF 2 /* body offset */
+#define TLV_OUI_LEN 3 /* oui id length */
+
+/* 802.11 Mgmt Packet flags */
+#define BRCMF_VNDR_IE_BEACON_FLAG 0x1
+#define BRCMF_VNDR_IE_PRBRSP_FLAG 0x2
+#define BRCMF_VNDR_IE_ASSOCRSP_FLAG 0x4
+#define BRCMF_VNDR_IE_AUTHRSP_FLAG 0x8
+#define BRCMF_VNDR_IE_PRBREQ_FLAG 0x10
+#define BRCMF_VNDR_IE_ASSOCREQ_FLAG 0x20
+/* vendor IE in IW advertisement protocol ID field */
+#define BRCMF_VNDR_IE_IWAPID_FLAG 0x40
+/* allow custom IE id */
+#define BRCMF_VNDR_IE_CUSTOM_FLAG 0x100
+
+/* P2P Action Frames flags (spec ordered) */
+#define BRCMF_VNDR_IE_GONREQ_FLAG 0x001000
+#define BRCMF_VNDR_IE_GONRSP_FLAG 0x002000
+#define BRCMF_VNDR_IE_GONCFM_FLAG 0x004000
+#define BRCMF_VNDR_IE_INVREQ_FLAG 0x008000
+#define BRCMF_VNDR_IE_INVRSP_FLAG 0x010000
+#define BRCMF_VNDR_IE_DISREQ_FLAG 0x020000
+#define BRCMF_VNDR_IE_DISRSP_FLAG 0x040000
+#define BRCMF_VNDR_IE_PRDREQ_FLAG 0x080000
+#define BRCMF_VNDR_IE_PRDRSP_FLAG 0x100000
+
+#define BRCMF_VNDR_IE_P2PAF_SHIFT 12
+
+
/**
* enum brcmf_scan_status - dongle scan status
*
@@ -52,11 +84,19 @@ enum brcmf_scan_status {
BRCMF_SCAN_STATUS_ABORT,
};
-/* wi-fi mode */
+/**
+ * enum wl_mode - driver mode of virtual interface.
+ *
+ * @WL_MODE_BSS: connects to BSS.
+ * @WL_MODE_IBSS: operate as ad-hoc.
+ * @WL_MODE_AP: operate as access-point.
+ * @WL_MODE_P2P: provide P2P discovery.
+ */
enum wl_mode {
WL_MODE_BSS,
WL_MODE_IBSS,
- WL_MODE_AP
+ WL_MODE_AP,
+ WL_MODE_P2P
};
/* dongle configuration */
@@ -108,6 +148,7 @@ struct brcmf_cfg80211_profile {
* @BRCMF_VIF_STATUS_READY: ready for operation.
* @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
* @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
* @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
* @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
*/
@@ -115,6 +156,7 @@ enum brcmf_vif_status {
BRCMF_VIF_STATUS_READY,
BRCMF_VIF_STATUS_CONNECTING,
BRCMF_VIF_STATUS_CONNECTED,
+ BRCMF_VIF_STATUS_DISCONNECTING,
BRCMF_VIF_STATUS_AP_CREATING,
BRCMF_VIF_STATUS_AP_CREATED
};
@@ -122,16 +164,22 @@ enum brcmf_vif_status {
/**
* struct vif_saved_ie - holds saved IEs for a virtual interface.
*
+ * @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
+ * @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
*/
struct vif_saved_ie {
+ u8 probe_req_ie[IE_MAX_LEN];
u8 probe_res_ie[IE_MAX_LEN];
u8 beacon_ie[IE_MAX_LEN];
+ u8 assoc_req_ie[IE_MAX_LEN];
+ u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 beacon_ie_len;
+ u32 assoc_req_ie_len;
};
/**
@@ -145,6 +193,7 @@ struct vif_saved_ie {
* @sme_state: SME state using enum brcmf_vif_status bits.
* @pm_block: power-management blocked.
* @list: linked list.
+ * @mgmt_rx_reg: registered rx mgmt frame types.
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@@ -156,6 +205,7 @@ struct brcmf_cfg80211_vif {
bool pm_block;
struct vif_saved_ie saved_ie;
struct list_head list;
+ u16 mgmt_rx_reg;
};
/* association inform */
@@ -189,6 +239,9 @@ struct escan_info {
u8 escan_buf[WL_ESCAN_BUF_SIZE];
struct wiphy *wiphy;
struct net_device *ndev;
+ s32 (*run)(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct cfg80211_scan_request *request, u16 action);
};
/**
@@ -273,10 +326,27 @@ struct brcmf_pno_scanresults_le {
};
/**
+ * struct brcmf_cfg80211_vif_event - virtual interface event information.
+ *
+ * @vif_wq: waitqueue awaiting interface event from firmware.
+ * @vif_event_lock: protects other members in this structure.
+ * @vif_complete: completion for net attach.
+ * @action: either add, change, or delete.
+ * @vif: virtual interface object related to the event.
+ */
+struct brcmf_cfg80211_vif_event {
+ wait_queue_head_t vif_wq;
+ struct mutex vif_event_lock;
+ u8 action;
+ struct brcmf_cfg80211_vif *vif;
+};
+
+/**
* struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
*
* @wiphy: wiphy object for cfg80211 interface.
* @conf: dongle configuration.
+ * @p2p: peer-to-peer specific information.
* @scan_request: cfg80211 scan request object.
* @usr_sync: mainly for dongle up/down synchronization.
* @bss_list: bss_list holding scanned ap information.
@@ -304,10 +374,12 @@ struct brcmf_pno_scanresults_le {
* @escan_ioctl_buf: dongle command buffer for escan commands.
* @vif_list: linked list of vif instances.
* @vif_cnt: number of vif instances.
+ * @vif_event: vif event signalling.
*/
struct brcmf_cfg80211_info {
struct wiphy *wiphy;
struct brcmf_cfg80211_conf *conf;
+ struct brcmf_p2p_info p2p;
struct cfg80211_scan_request *scan_request;
struct mutex usr_sync;
struct brcmf_scan_results *bss_list;
@@ -335,6 +407,21 @@ struct brcmf_cfg80211_info {
u8 *escan_ioctl_buf;
struct list_head vif_list;
u8 vif_cnt;
+ struct brcmf_cfg80211_vif_event vif_event;
+ struct completion vif_disabled;
+};
+
+/**
+ * struct brcmf_tlv - tag_ID/length/value_buffer tuple.
+ *
+ * @id: tag identifier.
+ * @len: number of bytes in value buffer.
+ * @data: value buffer.
+ */
+struct brcmf_tlv {
+ u8 id;
+ u8 len;
+ u8 data[1];
};
static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
@@ -389,4 +476,26 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
s32 brcmf_cfg80211_up(struct net_device *ndev);
s32 brcmf_cfg80211_down(struct net_device *ndev);
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+ enum nl80211_iftype type,
+ bool pm_block);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
+
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+ const u8 *vndr_ie_buf, u32 vndr_ie_len);
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+u16 channel_to_chanspec(struct ieee80211_channel *ch);
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif);
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout);
+s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort);
+void brcmf_set_mpc(struct net_device *ndev, int mpc);
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index cdb62b8ccc7..10ee314c422 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -183,8 +183,7 @@ static bool brcms_c_country_valid(const char *ccode)
* chars.
*/
if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
- (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
- ccode[2] == '\0'))
+ (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A))
return false;
/*
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 7fc49ca3f59..c6451c61407 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -36,6 +36,7 @@
#include "debug.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -542,9 +543,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
- brcms_err(core, "%s: arp filtering: enabled %s, count %d"
- " (implement)\n", __func__, info->arp_filter_enabled ?
- "true" : "false", info->arp_addr_cnt);
+ brcms_err(core, "%s: arp filtering: %d addresses"
+ " (implement)\n", __func__, info->arp_addr_cnt);
}
if (changed & BSS_CHANGED_QOS) {
@@ -713,16 +713,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+ bool result;
+
+ spin_lock_bh(&wl->lock);
+ result = brcms_c_tx_flush_completed(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return result;
+}
+
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
struct brcms_info *wl = hw->priv;
+ int ret;
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
- /* wait for packet queue and dma fifos to run empty */
- spin_lock_bh(&wl->lock);
- brcms_c_wait_for_tx_completion(wl->wlc, drop);
- spin_unlock_bh(&wl->lock);
+ ret = wait_event_timeout(wl->tx_flush_wq,
+ brcms_tx_flush_completed(wl),
+ msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+ brcms_dbg_mac80211(wl->wlc->hw->d11core,
+ "ret=%d\n", jiffies_to_msecs(ret));
}
static const struct ieee80211_ops brcms_ops = {
@@ -777,6 +790,7 @@ void brcms_dpc(unsigned long data)
done:
spin_unlock_bh(&wl->lock);
+ wake_up(&wl->tx_flush_wq);
}
/*
@@ -1025,6 +1039,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
atomic_set(&wl->callbacks, 0);
+ init_waitqueue_head(&wl->tx_flush_wq);
+
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
@@ -1614,13 +1630,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
spin_lock_bh(&wl->lock);
return blocked;
}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
- spin_unlock_bh(&wl->lock);
- msleep(ms);
- spin_lock_bh(&wl->lock);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 9358bd5ebd3..947ccacf43e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,6 +68,8 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
+ /* tx flush */
+ wait_queue_head_t tx_flush_wq;
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
extern void brcms_free_timer(struct brcms_timer *timer);
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
extern void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index c26992a60e6..8ef02dca8f8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -101,8 +101,6 @@
#define DOT11_RTS_LEN 16
#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
-#define DOT11_MIN_BEACON_PERIOD 1
-#define DOT11_MAX_BEACON_PERIOD 0xFFFF
#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
@@ -1027,7 +1025,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
static bool
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
- bool morepending = false;
struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
@@ -1041,23 +1038,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs = &txstatus;
core = wlc_hw->d11core;
*fatal = false;
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
- while (!(*fatal)
- && (s1 & TXS_V)) {
- /* !give others some time to run! */
- if (n >= max_tx_num) {
- morepending = true;
- break;
- }
+ while (n < max_tx_num) {
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (s1 == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
*fatal = true;
return false;
}
- s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+ /* only process when valid */
+ if (!(s1 & TXS_V))
+ break;
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1059,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+ if (*fatal == true)
+ return false;
n++;
}
- if (*fatal)
- return false;
-
- return morepending;
+ return n >= max_tx_num;
}
static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -3147,8 +3138,7 @@ void brcms_c_reset(struct brcms_c_info *wlc)
brcms_c_statsupd(wlc);
/* reset our snapshot of macstat counters */
- memset((char *)wlc->core->macstat_snapshot, 0,
- sizeof(struct macstat));
+ memset(wlc->core->macstat_snapshot, 0, sizeof(struct macstat));
brcms_b_reset(wlc->hw);
}
@@ -4061,7 +4051,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
return;
}
- memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
+ memset(&acp_shm, 0, sizeof(struct shm_acparams));
/* fill in shm ac params struct */
acp_shm.txop = params->txop;
/* convert from units of 32us to us for ucode */
@@ -4777,7 +4767,7 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
struct brcms_bss_info *bi = wlc->default_bss;
/* init default and target BSS with some sane initial values */
- memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
+ memset(bi, 0, sizeof(*bi));
bi->beacon_period = BEACON_INTERVAL_DEFAULT;
/* fill the default channel as the first valid channel
@@ -5306,7 +5296,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
/* Clear rateset override */
- memset(&rs, 0, sizeof(struct brcms_c_rateset));
+ memset(&rs, 0, sizeof(rs));
switch (gmode) {
case GMODE_LEGACY_B:
@@ -5529,7 +5519,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
if (rs->count > BRCMS_NUMRATES)
return -ENOBUFS;
- memset(&internal_rs, 0, sizeof(struct brcms_c_rateset));
+ memset(&internal_rs, 0, sizeof(internal_rs));
/* Copy only legacy rateset section */
internal_rs.count = rs->count;
@@ -5555,8 +5545,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
{
- if (period < DOT11_MIN_BEACON_PERIOD ||
- period > DOT11_MAX_BEACON_PERIOD)
+ if (period == 0)
return -EINVAL;
wlc->default_bss->beacon_period = period;
@@ -5633,7 +5622,7 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (!strcmp(wlc->modulecb[i].name, name) &&
(wlc->modulecb[i].hdl == hdl)) {
- memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
+ memset(&wlc->modulecb[i], 0, sizeof(wlc->modulecb[i]));
return 0;
}
}
@@ -6453,10 +6442,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!is_mcs_rate(rspec[k]))) {
- brcms_err(wlc->hw->d11core,
- "wl%d: %s: IEEE80211_TX_"
- "RC_MCS != is_mcs_rate(rspec)\n",
- wlc->pub->unit, __func__);
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s: IEEE80211_TX_RC_MCS != is_mcs_rate(rspec)\n",
+ wlc->pub->unit, __func__);
}
if (is_mcs_rate(rspec[k])) {
@@ -6689,11 +6677,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
(struct ofdm_phy_hdr *) rts_plcp) :
rts_plcp[0]) << 8;
} else {
- memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
- memset((char *)&txh->rts_frame, 0,
- sizeof(struct ieee80211_rts));
- memset((char *)txh->RTSPLCPFallback, 0,
- sizeof(txh->RTSPLCPFallback));
+ memset(txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+ memset(&txh->rts_frame, 0, sizeof(struct ieee80211_rts));
+ memset(txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback));
txh->RTSDurFallback = 0;
}
@@ -6848,21 +6834,19 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
wlc->fragthresh[queue] =
(u16) newfragthresh;
} else {
- brcms_err(wlc->hw->d11core,
- "wl%d: %s txop invalid "
- "for rate %d\n",
- wlc->pub->unit, fifo_names[queue],
- rspec2rate(rspec[0]));
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s txop invalid for rate %d\n",
+ wlc->pub->unit, fifo_names[queue],
+ rspec2rate(rspec[0]));
}
if (dur > wlc->edcf_txop[ac])
- brcms_err(wlc->hw->d11core,
- "wl%d: %s: %s txop "
- "exceeded phylen %d/%d dur %d/%d\n",
- wlc->pub->unit, __func__,
- fifo_names[queue],
- phylen, wlc->fragthresh[queue],
- dur, wlc->edcf_txop[ac]);
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
+ wlc->pub->unit, __func__,
+ fifo_names[queue],
+ phylen, wlc->fragthresh[queue],
+ dur, wlc->edcf_txop[ac]);
}
}
@@ -7337,7 +7321,7 @@ brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
*len = hdr_len + body_len;
/* format PHY and MAC headers */
- memset((char *)buf, 0, hdr_len);
+ memset(buf, 0, hdr_len);
plcp = (struct cck_phy_hdr *) buf;
@@ -7408,9 +7392,13 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
struct brcms_bss_cfg *cfg,
bool suspend)
{
- u16 prb_resp[BCN_TMPL_LEN / 2];
+ u16 *prb_resp;
int len = BCN_TMPL_LEN;
+ prb_resp = kmalloc(BCN_TMPL_LEN, GFP_ATOMIC);
+ if (!prb_resp)
+ return;
+
/*
* write the probe response to hardware, or save in
* the config structure
@@ -7444,6 +7432,8 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
if (suspend)
brcms_c_enable_mac(wlc);
+
+ kfree(prb_resp);
}
void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
@@ -7517,25 +7507,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
return wlc->band->bandunit;
}
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
- int timeout = 20;
int i;
/* Kick DMA to send any pending AMPDU */
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
- dma_txflush(wlc->hw->di[i]);
-
- /* wait for queue and DMA fifos to run dry */
- while (brcms_txpktpendtot(wlc) > 0) {
- brcms_msleep(wlc->wl, 1);
-
- if (--timeout == 0)
- break;
- }
+ dma_kick_tx(wlc->hw->di[i]);
- WARN_ON_ONCE(timeout == 0);
+ return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 4fb2834f4e6..b0f14b7b861 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
- bool drop);
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 050ce7c70d7..3630a41df50 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -572,26 +572,11 @@ il3945_tx_skb(struct il_priv *il,
il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
/* Total # bytes to be transmitted */
- len = (u16) skb->len;
- tx_cmd->len = cpu_to_le16(len);
+ tx_cmd->len = cpu_to_le16((u16) skb->len);
- il_update_stats(il, true, fc, len);
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
- D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -610,14 +595,8 @@ il3945_tx_skb(struct il_priv *il,
* within command buffer array. */
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+ goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -626,10 +605,34 @@ il3945_tx_skb(struct il_priv *il,
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+ goto drop_unlock;
+ }
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+ if (len)
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
U32_PAD(len));
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
+ il_update_stats(il, true, fc, skb->len);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
/* Tell device the write idx *just past* this latest filled TFD */
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
@@ -1001,12 +1004,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
+ dma_addr_t page_dma;
unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
-
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
@@ -1035,26 +1038,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
break;
}
+ /* Get physical address of RB/SKB */
+ page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+ __free_pages(page, il->hw_params.rx_page_order);
+ break;
+ }
+
spin_lock_irqsave(&rxq->lock, flags);
+
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
+ pci_unmap_page(il->pci_dev, page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma =
- pci_map_page(il->pci_dev, page, 0,
- PAGE_SIZE << il->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
+ rxb->page_dma = page_dma;
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
@@ -1284,8 +1295,15 @@ il3945_rx_handle(struct il_priv *il)
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
+ rxb->page_dma))) {
+ __il_free_pages(il, rxb->page);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index f1dc0400656..7941eb3a016 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -319,6 +319,7 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
+ dma_addr_t page_dma;
unsigned long flags;
gfp_t gfp_mask = priority;
@@ -356,33 +357,35 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
return;
}
+ /* Get physical address of the RB */
+ page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+ __free_pages(page, il->hw_params.rx_page_order);
+ break;
+ }
+
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
+ pci_unmap_page(il->pci_dev, page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma =
- pci_map_page(il->pci_dev, page, 0,
- PAGE_SIZE << il->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
+ rxb->page = page;
+ rxb->page_dma = page_dma;
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
@@ -725,6 +728,16 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+ /* We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them.
+ */
+
+ rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status.ampdu_reference = il->_4965.ampdu_ref;
+ }
+
il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
&rx_status);
}
@@ -736,6 +749,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
il->_4965.last_phy_res_valid = true;
+ il->_4965.ampdu_ref++;
memcpy(&il->_4965.last_phy_res, pkt->u.raw,
sizeof(struct il_rx_phy_res));
}
@@ -1779,8 +1793,7 @@ il4965_tx_skb(struct il_priv *il,
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
- len = (u16) skb->len;
- tx_cmd->len = cpu_to_le16(len);
+ tx_cmd->len = cpu_to_le16((u16) skb->len);
if (info->control.hw_key)
il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
@@ -1790,7 +1803,6 @@ il4965_tx_skb(struct il_priv *il,
il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
- il_update_stats(il, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -1812,18 +1824,8 @@ il4965_tx_skb(struct il_priv *il,
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+ goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -1832,8 +1834,24 @@ il4965_tx_skb(struct il_priv *il,
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+ goto drop_unlock;
+ }
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ if (secondlen)
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
0, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
scratch_phys =
@@ -1846,6 +1864,8 @@ il4965_tx_skb(struct il_priv *il,
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+ il_update_stats(il, true, fc, skb->len);
+
D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
@@ -4281,8 +4301,16 @@ il4965_rx_handle(struct il_priv *il)
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
+ rxb->page_dma))) {
+ __il_free_pages(il, rxb->page);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
@@ -5711,7 +5739,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
/* Tell mac80211 our characteristics */
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
@@ -6573,9 +6601,6 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_eeprom;
- if (err)
- goto out_free_eeprom;
-
/* extract MAC Address */
il4965_eeprom_get_mac(il, il->addresses[0].addr);
D_INFO("MAC address: %pM\n", il->addresses[0].addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index f3b8e91aa3d..e8324b5e5bf 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -1183,8 +1183,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
- WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 5db11714e04..91eb2d07fdb 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -1748,7 +1748,6 @@ static void
il4965_post_associate(struct il_priv *il)
{
struct ieee80211_vif *vif = il->vif;
- struct ieee80211_conf *conf = NULL;
int ret = 0;
if (!vif || !il->is_open)
@@ -1759,8 +1758,6 @@ il4965_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il_commit_rxon(il);
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 25dd7d28d02..3b6c9940089 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -1134,8 +1134,9 @@ struct il_wep_cmd {
#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 1f598604a79..e006ea83132 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1830,32 +1830,30 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
__le32 sta_flags;
- u8 mimo_ps_mode;
if (!sta || !sta_ht_inf->ht_supported)
goto done;
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
D_ASSOC("spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+ (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
"disabled");
sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ case IEEE80211_SMPS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
+ case IEEE80211_SMPS_OFF:
break;
default:
- IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
break;
}
@@ -3162,18 +3160,23 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
idx, il->cmd_queue);
}
#endif
- txq->need_update = 1;
-
- if (il->ops->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
phys_addr =
pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
+ idx = -ENOMEM;
+ goto out;
+ }
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, fix_size);
+ txq->need_update = 1;
+
+ if (il->ops->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
+
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
U32_PAD(cmd->len));
@@ -3181,6 +3184,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
+out:
spin_unlock_irqrestore(&il->hcmd_lock, flags);
return idx;
}
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 37fe553b25e..96f2025d936 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1356,6 +1356,7 @@ struct il_priv {
struct {
struct il_rx_phy_res last_phy_res;
bool last_phy_res_valid;
+ u32 ampdu_ref;
struct completion firmware_loading_complete;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 5cf43236421..ba319cba3f1 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -43,8 +43,20 @@ config IWLWIFI
module will be called iwlwifi.
config IWLDVM
- tristate "Intel Wireless WiFi"
+ tristate "Intel Wireless WiFi DVM Firmware support"
depends on IWLWIFI
+ help
+ This is the driver supporting the DVM firmware which is
+ currently the only firmware available for existing devices.
+
+config IWLMVM
+ tristate "Intel Wireless WiFi MVM Firmware support"
+ depends on IWLWIFI
+ help
+ This is the driver supporting the MVM firmware which is
+ currently only available for 7000 series devices.
+
+ Say yes if you have such a device.
menu "Debugging Options"
depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 170ec330d2a..6c7800044a0 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,8 +5,10 @@ iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
+iwlwifi-objs += pcie/7000.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
@@ -15,5 +17,6 @@ ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
+obj-$(CONFIG_IWLMVM) += mvm/
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 33b3ad2e546..41ec27cb6ef 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -338,7 +338,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
+ struct ieee80211_sta *sta);
static inline int iwl_sta_id(struct ieee80211_sta *sta)
{
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index de54713b680..6468de8634b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index 2349f393cc4..65e920cab2b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 0ca99c13f7f..02c9ebb3b34 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -3897,6 +3897,24 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed;
+#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
+
+/*
+ * REPLY_WOWLAN_GET_STATUS = 0xe5
+ */
+struct iwlagn_wowlan_status {
+ __le64 replay_ctr;
+ __le32 rekey_status;
+ __le32 wakeup_reason;
+ u8 pattern_number;
+ u8 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le16 non_qos_seq_ctr;
+ __le16 reserved2;
+ union iwlagn_all_tsc_rsc tsc_rsc;
+ __le16 reserved3;
+} __packed;
+
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 72c74af3813..20806cae11b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 2653a891cc7..71ea77576d2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 8c72be3f37c..15cca2ef929 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 844a17f99a1..33c7e15d24f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853103d..8749dcfe695 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 6ff46605ad4..86ea5f4c393 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 0353e1c0670..323e4a33fca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -145,14 +145,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_SCAN_WHILE_IDLE;
+ IEEE80211_HW_WANT_MONITOR_VIF;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -442,54 +441,154 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
return ret;
}
+struct iwl_resume_data {
+ struct iwl_priv *priv;
+ struct iwlagn_wowlan_status *cmd;
+ bool valid;
+};
+
+static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_resume_data *resume_data = data;
+ struct iwl_priv *priv = resume_data->priv;
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ if (len - 4 != sizeof(*resume_data->cmd)) {
+ IWL_ERR(priv, "rx wrong size data\n");
+ return true;
+ }
+ memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
+ resume_data->valid = true;
+
+ return true;
+}
+
static int iwlagn_mac_resume(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct ieee80211_vif *vif;
- unsigned long flags;
- u32 base, status = 0xffffffff;
- int ret = -EIO;
+ u32 base;
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct iwl_notification_wait status_wait;
+ static const u8 status_cmd[] = {
+ REPLY_WOWLAN_GET_STATUS,
+ };
+ struct iwlagn_wowlan_status status_data = {};
+ struct iwl_resume_data resume_data = {
+ .priv = priv,
+ .cmd = &status_data,
+ .valid = false,
+ };
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ const struct fw_img *img;
+#endif
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
- iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ ret = iwl_trans_d3_resume(priv->trans, &d3_status);
+ if (ret)
+ goto out_unlock;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(priv, "Device was reset during suspend\n");
+ goto out_unlock;
+ }
base = priv->device_pointers.error_event_table;
- if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&priv->trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(priv->trans, true)) {
- iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- iwl_trans_release_nic_access(priv->trans);
- ret = 0;
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_WARN(priv, "Invalid error table during resume!\n");
+ goto out_unlock;
+ }
+
+ iwl_trans_read_mem_bytes(priv->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
+ err_info.valid, err_info.error_id);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ wakeup.rfkill_release = true;
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
}
- spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+ goto out_unlock;
+ }
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (ret == 0) {
- const struct fw_img *img;
-
- img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
- if (!priv->wowlan_sram) {
- priv->wowlan_sram =
- kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
- GFP_KERNEL);
- }
+ img = &priv->fw->img[IWL_UCODE_WOWLAN];
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ iwl_trans_read_mem(priv->trans, 0x800000,
+ priv->wowlan_sram,
+ img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+#endif
+
+ /*
+ * This is very strange. The GET_STATUS command is sent but the device
+ * doesn't reply properly, it seems it doesn't close the RBD so one is
+ * always left open ... As a result, we need to send another command
+ * and have to reset the driver afterwards. As we need to switch to
+ * runtime firmware again that'll happen.
+ */
+
+ iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
+ ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
+ &resume_data);
+
+ iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
+ iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
+ /* an RBD is left open in the firmware now! */
- if (priv->wowlan_sram)
- iwl_trans_read_mem(
- priv->trans, 0x800000,
- priv->wowlan_sram,
- img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+ ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
+ if (ret)
+ goto out_unlock;
+
+ if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
+ u32 reasons = le32_to_cpu(status_data.wakeup_reason);
+ struct cfg80211_wowlan_wakeup *wakeup_report;
+
+ IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
+
+ if (reasons) {
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
+ wakeup.pattern_idx = status_data.pattern_number;
+ if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
+ wakeup.disconnect = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
+ wakeup.gtk_rekey_failure = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
+ wakeup.eap_identity_req = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+ wakeup_report = &wakeup;
+ } else {
+ wakeup_report = NULL;
}
-#endif
- }
- /* we'll clear ctx->vif during iwlagn_prepare_restart() */
- vif = ctx->vif;
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ }
priv->wowlan = false;
@@ -499,6 +598,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
+ out_unlock:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1154,6 +1254,7 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
}
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index a64f361e341..b9e3517652d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -353,11 +353,8 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
- if (!iwl_trans_grab_nic_access(priv->trans, false)) {
- spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+ if (!iwl_trans_grab_nic_access(priv->trans, false, &reg_flags))
return;
- }
/* Set starting address; reads will auto-increment */
iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -388,8 +385,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
}
}
/* Allow device to power down */
- iwl_trans_release_nic_access(priv->trans);
- spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+ iwl_trans_release_nic_access(priv->trans, &reg_flags);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -1717,9 +1713,8 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&trans->reg_lock, reg_flags);
- if (!iwl_trans_grab_nic_access(trans, false))
- goto out_unlock;
+ if (!iwl_trans_grab_nic_access(trans, false, &reg_flags))
+ return pos;
/* Set starting address; reads will auto-increment */
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -1757,9 +1752,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
}
/* Allow device to power down */
- iwl_trans_release_nic_access(trans);
-out_unlock:
- spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
+ iwl_trans_release_nic_access(trans, &reg_flags);
return pos;
}
@@ -1991,13 +1984,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
/* SKU Control */
- iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
- CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
- (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
- (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+ (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+ (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
/* write radio config values to register */
if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
@@ -2009,10 +2002,11 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
priv->nvm_data->radio_cfg_dash <<
CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
- iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
- CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
- CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
+ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
+ reg_val);
IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
priv->nvm_data->radio_cfg_type,
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 518cf371580..bd69018d07a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index a2cee7f0484..7b03e1342d4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index f3dd0da60d8..abe30426726 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -411,8 +411,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
* BT traffic, as they would just be disrupted by BT.
*/
if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
- IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
- priv->bt_traffic_load);
+ IWL_DEBUG_COEX(priv,
+ "BT traffic (%d), no aggregation allowed\n",
+ priv->bt_traffic_load);
return ret;
}
@@ -1288,8 +1289,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1304,7 +1304,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1344,8 +1344,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1360,7 +1359,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1409,7 +1408,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index ad3aea8f626..5d83cab22d6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cac4f37cc42..a4eed2055fd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -790,7 +790,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx_ni(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 9a891e6e60e..23be948cf16 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1545,10 +1545,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
}
- if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
- priv->beacon_ctx) {
+ if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
if (iwlagn_update_beacon(priv, vif))
- IWL_ERR(priv, "Error sending IBSS beacon\n");
+ IWL_ERR(priv, "Error updating beacon\n");
}
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 610ed2204e1..3a4aa5239c4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index bdba9543c35..94ef33838bc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -77,7 +77,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
@@ -119,7 +119,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -173,7 +173,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
+ struct ieee80211_sta *sta)
{
if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
return false;
@@ -183,20 +183,11 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
return false;
#endif
- /*
- * Remainder of this function checks ht_cap, but if it's
- * NULL then we can do HT40 (special case for RXON)
- */
- if (!ht_cap)
+ /* special case for RXON */
+ if (!sta)
return true;
- if (!ht_cap->ht_supported)
- return false;
-
- if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
- return false;
-
- return true;
+ return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -205,7 +196,6 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
__le32 *flags, __le32 *mask)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- u8 mimo_ps_mode;
*mask = STA_FLG_RTS_MIMO_PROT_MSK |
STA_FLG_MIMO_DIS_MSK |
@@ -217,26 +207,24 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
if (!sta || !sta_ht_inf->ht_supported)
return;
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
-
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
sta->addr,
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
"static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
"dynamic" : "disabled");
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_STATIC:
*flags |= STA_FLG_MIMO_DIS_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ case IEEE80211_SMPS_DYNAMIC:
*flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
+ case IEEE80211_SMPS_OFF:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
break;
}
@@ -246,7 +234,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
*flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
*flags |= STA_FLG_HT40_EN_MSK;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index 57b918ce3b5..dc6f965a123 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index b28cfc8553d..67e2e1321b4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -185,10 +185,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&priv->trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(priv->trans, false))
- iwl_trans_release_nic_access(priv->trans);
- spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(priv->trans, false, &flags))
+ iwl_trans_release_nic_access(priv->trans, &flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 44c7c8f30a2..9356c4b908c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 6b01fc19594..d1dccb36139 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -1117,7 +1117,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
if (is_agg)
iwl_rx_reply_tx_agg(priv, tx_resp);
@@ -1145,6 +1145,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
next_reclaimed = ssn;
}
+ if (tid != IWL_TID_NON_QOS) {
+ priv->tid_data[sta_id][tid].next_reclaimed =
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ }
+
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1195,16 +1202,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
- /*
- * W/A for FW bug - the seq_ctl isn't updated when the
- * queues are flushed. Fetch it from the packet itself
- */
- if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
- next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
- next_reclaimed =
- SEQ_TO_SN(next_reclaimed + 0x10);
- }
-
is_offchannel_skb =
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
@@ -1239,11 +1236,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
le16_to_cpu(tx_resp->seq_ctl));
iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
if (is_offchannel_skb)
@@ -1290,12 +1287,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1309,7 +1306,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_TX_QUEUES(priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1378,11 +1375,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
}
}
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index ebec13a3329..736fe9bb140 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7960a52f6ad..e9975c54c27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 864219d2136..743b4834335 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6030,
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
+ IWL_DEVICE_FAMILY_7000,
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b419a1efac0..df3463a3870 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 42b20b0e83b..8cf5db7fb5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -116,6 +116,7 @@ do { \
#define IWL_DL_HCMD 0x00000004
#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_TE 0x00000020
#define IWL_DL_EEPROM 0x00000040
#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
@@ -156,6 +157,7 @@ do { \
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a)
#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 70191ddbd8f..8f61c717f61 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index dc7e26b2f38..9a0f45ec9e0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d3549f493a1..6f228bb2b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,8 +139,10 @@ struct iwl_drv {
#endif
};
-#define DVM_OP_MODE 0
-#define MVM_OP_MODE 1
+enum {
+ DVM_OP_MODE = 0,
+ MVM_OP_MODE = 1,
+};
/* Protects the table contents, i.e. the ops pointer & drv list */
static struct mutex iwlwifi_opmode_table_mtx;
@@ -149,8 +151,8 @@ static struct iwlwifi_opmode_table {
const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
struct list_head drv; /* list of devices using this op_mode */
} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
- { .name = "iwldvm", .ops = NULL },
- { .name = "iwlmvm", .ops = NULL },
+ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
+ [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
};
/*
@@ -268,7 +270,7 @@ struct fw_sec_parsing {
*/
struct iwl_tlv_calib_data {
__le32 ucode_type;
- __le64 calib;
+ struct iwl_tlv_calib_ctrl calib;
} __packed;
struct iwl_firmware_pieces {
@@ -358,7 +360,11 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
ucode_type);
return -EINVAL;
}
- drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib);
+ drv->fw.default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ drv->fw.default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
return 0;
}
@@ -959,7 +965,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
release_firmware(ucode_raw);
mutex_lock(&iwlwifi_opmode_table_mtx);
- op = &iwlwifi_opmode_table[DVM_OP_MODE];
+ if (fw->mvm_fw)
+ op = &iwlwifi_opmode_table[MVM_OP_MODE];
+ else
+ op = &iwlwifi_opmode_table[DVM_OP_MODE];
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 285de5f68c0..594a5c71b27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2013 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 471986690cf..034f2ff4f43 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -703,9 +703,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
-static int iwl_init_sband_channels(struct iwl_nvm_data *data,
- struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band)
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band)
{
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
@@ -728,10 +728,10 @@ static int iwl_init_sband_channels(struct iwl_nvm_data *data,
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
{
int max_bit_rate = 0;
u8 rx_chains;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 555f0eb61d4..683fe6a8c58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -126,4 +126,13 @@ static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans);
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band);
+
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band);
+
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index 27c7da3c6ed..ef4806f27cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index 1337c9d36fe..b2588c5cbf9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index c646a90b725..f5592fb3b1e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -414,6 +414,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
* uCode/driver must write "1" in order to clear this flag
*/
#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008)
#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index e71564053e7..90873eca35f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 715291eb7f8..b545178e46e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,6 +139,19 @@ struct fw_img {
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -153,6 +166,7 @@ struct fw_img {
* @inst_evtlog_ptr: event log offset for runtime ucode.
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
+ * @mvm_fw: indicates this is MVM firmware
*/
struct iwl_fw {
u32 ucode_ver;
@@ -168,7 +182,7 @@ struct iwl_fw {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
- u64 default_calib[IWL_UCODE_TYPE_MAX];
+ struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
u32 phy_config;
bool mvm_fw;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index bff3ac96c00..276410d82de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -35,54 +35,6 @@
#define IWL_POLL_INTERVAL 10 /* microseconds */
-void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
-}
-
-void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
-}
-
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- __iwl_set_bit(trans, reg, mask);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bit);
-
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- __iwl_clear_bit(trans, reg, mask);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_clear_bit);
-
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
- unsigned long flags;
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
-
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
@@ -103,13 +55,10 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value = 0x5a5a5a5a;
unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
value = iwl_read32(trans, reg);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
return value;
}
@@ -119,12 +68,10 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, reg, value);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_direct32);
@@ -162,12 +109,10 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
unsigned long flags;
u32 val = 0x5a5a5a5a;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
val = __iwl_read_prph(trans, ofs);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
return val;
}
EXPORT_SYMBOL_GPL(iwl_read_prph);
@@ -176,12 +121,10 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs, val);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_prph);
@@ -189,13 +132,11 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
__iwl_read_prph(trans, ofs) | mask);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
@@ -204,13 +145,11 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
(__iwl_read_prph(trans, ofs) & mask) | bits);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
@@ -219,12 +158,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
val = __iwl_read_prph(trans, ofs);
__iwl_write_prph(trans, ofs, (val & ~mask));
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index dc478068596..fd9f5b97fff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -51,12 +51,15 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
return val;
}
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+ iwl_trans_set_bits_mask(trans, reg, mask, mask);
+}
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
+static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+ iwl_trans_set_bits_mask(trans, reg, mask, 0);
+}
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d9a86d6b2bd..e5e3a79eae2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index c61f2070f15..c3affbc62cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 821523100cf..c2ce764463a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
new file mode 100644
index 00000000000..a70213bdb83
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -0,0 +1,346 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+
+/* NVM offsets (in words) definitions */
+enum wkp_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ HW_ADDR = 0x15,
+
+/* NVM SW-Section offset (in words) definitions */
+ NVM_SW_SECTION = 0x1C0,
+ NVM_VERSION = 0,
+ RADIO_CFG = 1,
+ SKU = 2,
+ N_HW_ADDRS = 3,
+ NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
+
+/* NVM calibration section offset (in words) definitions */
+ NVM_CALIB_SECTION = 0x2B8,
+ XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+};
+
+/* SKU Capabilities (actual values from NVM definition) */
+enum nvm_sku_bits {
+ NVM_SKU_CAP_BAND_24GHZ = BIT(0),
+ NVM_SKU_CAP_BAND_52GHZ = BIT(1),
+ NVM_SKU_CAP_11N_ENABLE = BIT(2),
+};
+
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+/*
+ * These are the channel numbers in the order that they are stored in the NVM
+ */
+static const u8 iwl_nvm_channels[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44 , 48, 52, 56, 60, 64,
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165
+};
+
+#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define NUM_2GHZ_CHANNELS 14
+#define FIRST_2GHZ_HT_MINUS 5
+#define LAST_2GHZ_HT_PLUS 9
+#define LAST_5GHZ_HT 161
+
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+ { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+ { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+ { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+ { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+ { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+ { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+ { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+ { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+ { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS 0
+#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS 4
+#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ */
+enum iwl_nvm_channel_flags {
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_DFS = BIT(7),
+ NVM_CHANNEL_WIDE = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+};
+
+#define CHECK_AND_PRINT_I(x) \
+ ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 * const nvm_ch_flags)
+{
+ int ch_idx;
+ int n_channels = 0;
+ struct ieee80211_channel *channel;
+ u16 ch_flags;
+ bool is_5ghz;
+
+ for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+ ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+ if (!(ch_flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+ iwl_nvm_channels[ch_idx],
+ ch_flags,
+ (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ "5.2" : "2.4");
+ continue;
+ }
+
+ channel = &data->channels[n_channels];
+ n_channels++;
+
+ channel->hw_value = iwl_nvm_channels[ch_idx];
+ channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channel->center_freq =
+ ieee80211_channel_to_frequency(
+ channel->hw_value, channel->band);
+
+ /* TODO: Need to be dependent to the NVM */
+ channel->flags = IEEE80211_CHAN_NO_HT40;
+ if (ch_idx < NUM_2GHZ_CHANNELS &&
+ (ch_flags & NVM_CHANNEL_40MHZ)) {
+ if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+ (ch_flags & NVM_CHANNEL_40MHZ)) {
+ if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ else
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ }
+
+ if (!(ch_flags & NVM_CHANNEL_IBSS))
+ channel->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch_flags & NVM_CHANNEL_ACTIVE))
+ channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch_flags & NVM_CHANNEL_RADAR)
+ channel->flags |= IEEE80211_CHAN_RADAR;
+
+ /* Initialize regulatory-based run-time data */
+
+ /* TODO: read the real value from the NVM */
+ channel->max_power = 0;
+ is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel->hw_value,
+ is_5ghz ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ ch_flags,
+ channel->max_power,
+ ((ch_flags & NVM_CHANNEL_IBSS) &&
+ !(ch_flags & NVM_CHANNEL_RADAR))
+ ? "" : "not ");
+ }
+
+ return n_channels;
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_sw)
+{
+ int n_channels = iwl_init_channel_map(dev, cfg, data,
+ &nvm_sw[NVM_CHANNELS]);
+ int n_used = 0;
+ struct ieee80211_supported_band *sband;
+
+ sband = &data->bands[IEEE80211_BAND_2GHZ];
+ sband->band = IEEE80211_BAND_2GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+ sband->n_bitrates = N_RATES_24;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ sband = &data->bands[IEEE80211_BAND_5GHZ];
+ sband->band = IEEE80211_BAND_5GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+ sband->n_bitrates = N_RATES_52;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ if (n_channels != n_used)
+ IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
+ n_used, n_channels);
+}
+
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw, const __le16 *nvm_sw,
+ const __le16 *nvm_calib)
+{
+ struct iwl_nvm_data *data;
+ u8 hw_addr[ETH_ALEN];
+ u16 radio_cfg, sku;
+
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+
+ radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+ sku = le16_to_cpup(nvm_sw + SKU);
+ data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ data->sku_cap_11n_enable = false;
+
+ /* check overrides (some devices have wrong NVM) */
+ if (cfg->valid_tx_ant)
+ data->valid_tx_ant = cfg->valid_tx_ant;
+ if (cfg->valid_rx_ant)
+ data->valid_rx_ant = cfg->valid_rx_ant;
+
+ if (!data->valid_tx_ant || !data->valid_rx_ant) {
+ IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+ data->valid_tx_ant, data->valid_rx_ant);
+ kfree(data);
+ return NULL;
+ }
+
+ data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+
+ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+ data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+
+ iwl_init_sbands(dev, cfg, data, nvm_sw);
+
+ data->calib_version = 255; /* TODO:
+ this value will prevent some checks from
+ failing, we need to check if this
+ field is still needed, and if it does,
+ where is it in the NVM*/
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
new file mode 100644
index 00000000000..b2692bd287f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_nvm_parse_h__
+#define __iwl_nvm_parse_h__
+
+#include "iwl-eeprom-parse.h"
+
+/**
+ * iwl_parse_nvm_data - parse NVM data and return values
+ *
+ * This function parses all NVM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw, const __le16 *nvm_sw,
+ const __le16 *nvm_calib);
+
+#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index c8d9b951746..4a680019e11 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,8 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
+#include <linux/debugfs.h>
+
struct iwl_op_mode;
struct iwl_trans;
struct sk_buff;
@@ -111,13 +113,13 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
- * Must be atomic and called with BH disabled.
+ * This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- * the radio is killed. Must be atomic.
+ * the radio is killed. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@@ -128,8 +130,7 @@ struct iwl_cfg;
* called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
- * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
- * with BH disabled.
+ * @wimax_active: invoked when WiMax becomes active. May sleep
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -176,6 +177,7 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
+ might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@@ -194,6 +196,7 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
{
+ might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
}
@@ -221,6 +224,7 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
{
+ might_sleep();
op_mode->ops->wimax_active(op_mode);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644
index 00000000000..14fc8d39fc2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,514 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "iwl-phy-db.h"
+#include "iwl-debug.h"
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
+#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_TXP_CH_GROUPS 9
+
+struct iwl_phy_db_entry {
+ u16 size;
+ u8 *data;
+};
+
+/**
+ * struct iwl_phy_db - stores phy configuration and calibration data.
+ *
+ * @cfg: phy configuration.
+ * @calib_nch: non channel specific calibration data.
+ * @calib_ch: channel specific calibration data.
+ * @calib_ch_group_papd: calibration data related to papd channel group.
+ * @calib_ch_group_txp: calibration data related to tx power chanel group.
+ */
+struct iwl_phy_db {
+ struct iwl_phy_db_entry cfg;
+ struct iwl_phy_db_entry calib_nch;
+ struct iwl_phy_db_entry calib_ch;
+ struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
+ struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+
+ u32 channel_num;
+ u32 channel_size;
+
+ struct iwl_trans *trans;
+};
+
+enum iwl_phy_db_section_type {
+ IWL_PHY_DB_CFG = 1,
+ IWL_PHY_DB_CALIB_NCH,
+ IWL_PHY_DB_CALIB_CH,
+ IWL_PHY_DB_CALIB_CHG_PAPD,
+ IWL_PHY_DB_CALIB_CHG_TXP,
+ IWL_PHY_DB_MAX
+};
+
+#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+
+/*
+ * phy db - configure operational ucode
+ */
+struct iwl_phy_db_cmd {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwl_phy_db_chg_txp {
+ __le32 space;
+ __le16 max_channel_idx;
+} __packed;
+
+/*
+ * phy db - Receieve phy db chunk after calibrations
+ */
+struct iwl_calib_res_notif_phy_db {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
+static inline void iwl_phy_db_test_pic(__le32 pic)
+{
+ WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
+}
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
+{
+ struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
+ GFP_KERNEL);
+
+ if (!phy_db)
+ return phy_db;
+
+ phy_db->trans = trans;
+
+ /* TODO: add default values of the phy db. */
+ return phy_db;
+}
+EXPORT_SYMBOL(iwl_phy_db_init);
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+static struct iwl_phy_db_entry *
+iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u16 chg_id)
+{
+ if (!phy_db || type >= IWL_PHY_DB_MAX)
+ return NULL;
+
+ switch (type) {
+ case IWL_PHY_DB_CFG:
+ return &phy_db->cfg;
+ case IWL_PHY_DB_CALIB_NCH:
+ return &phy_db->calib_nch;
+ case IWL_PHY_DB_CALIB_CH:
+ return &phy_db->calib_ch;
+ case IWL_PHY_DB_CALIB_CHG_PAPD:
+ if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_papd[chg_id];
+ case IWL_PHY_DB_CALIB_CHG_TXP:
+ if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_txp[chg_id];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u16 chg_id)
+{
+ struct iwl_phy_db_entry *entry =
+ iwl_phy_db_get_section(phy_db, type, chg_id);
+ if (!entry)
+ return;
+
+ kfree(entry->data);
+ entry->data = NULL;
+ entry->size = 0;
+}
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db)
+{
+ int i;
+
+ if (!phy_db)
+ return;
+
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
+ for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+ for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+
+ kfree(phy_db);
+}
+EXPORT_SYMBOL(iwl_phy_db_free);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+ gfp_t alloc_ctx)
+{
+ struct iwl_calib_res_notif_phy_db *phy_db_notif =
+ (struct iwl_calib_res_notif_phy_db *)pkt->data;
+ enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
+ u16 size = le16_to_cpu(phy_db_notif->length);
+ struct iwl_phy_db_entry *entry;
+ u16 chg_id = 0;
+
+ if (!phy_db)
+ return -EINVAL;
+
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
+ type == IWL_PHY_DB_CALIB_CHG_TXP)
+ chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+
+ entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+ if (!entry)
+ return -EINVAL;
+
+ kfree(entry->data);
+ entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
+ if (!entry->data) {
+ entry->size = 0;
+ return -ENOMEM;
+ }
+
+ entry->size = size;
+
+ if (type == IWL_PHY_DB_CALIB_CH) {
+ phy_db->channel_num =
+ le32_to_cpup((__le32 *)phy_db_notif->data);
+ phy_db->channel_size =
+ (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
+ }
+
+ /* Test PIC */
+ if (type != IWL_PHY_DB_CFG)
+ iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
+ (size / sizeof(__le32)) - 1));
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_phy_db_set_section);
+
+static int is_valid_channel(u16 ch_id)
+{
+ if (ch_id <= 14 ||
+ (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+ (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+ (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+ return 1;
+ return 0;
+}
+
+static u8 ch_id_to_ch_index(u16 ch_id)
+{
+ if (WARN_ON(!is_valid_channel(ch_id)))
+ return 0xff;
+
+ if (ch_id <= 14)
+ return ch_id - 1;
+ if (ch_id <= 64)
+ return (ch_id + 20) / 4;
+ if (ch_id <= 140)
+ return (ch_id - 12) / 4;
+ return (ch_id - 13) / 4;
+}
+
+
+static u16 channel_id_to_papd(u16 ch_id)
+{
+ if (WARN_ON(!is_valid_channel(ch_id)))
+ return 0xff;
+
+ if (1 <= ch_id && ch_id <= 14)
+ return 0;
+ if (36 <= ch_id && ch_id <= 64)
+ return 1;
+ if (100 <= ch_id && ch_id <= 140)
+ return 2;
+ return 3;
+}
+
+static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
+{
+ struct iwl_phy_db_chg_txp *txp_chg;
+ int i;
+ u8 ch_index = ch_id_to_ch_index(ch_id);
+ if (ch_index == 0xff)
+ return 0xff;
+
+ for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+ txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+ if (!txp_chg)
+ return 0xff;
+ /*
+ * Looking for the first channel group that its max channel is
+ * higher then wanted channel.
+ */
+ if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
+ return i;
+ }
+ return 0xff;
+}
+static
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+ u32 type, u8 **data, u16 *size, u16 ch_id)
+{
+ struct iwl_phy_db_entry *entry;
+ u32 channel_num;
+ u32 channel_size;
+ u16 ch_group_id = 0;
+ u16 index;
+
+ if (!phy_db)
+ return -EINVAL;
+
+ /* find wanted channel group */
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
+ ch_group_id = channel_id_to_papd(ch_id);
+ else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
+ ch_group_id = channel_id_to_txp(phy_db, ch_id);
+
+ entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
+ if (!entry)
+ return -EINVAL;
+
+ if (type == IWL_PHY_DB_CALIB_CH) {
+ index = ch_id_to_ch_index(ch_id);
+ channel_num = phy_db->channel_num;
+ channel_size = phy_db->channel_size;
+ if (index >= channel_num) {
+ IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
+ ch_id);
+ return -EINVAL;
+ }
+ *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
+ *size = channel_size;
+ } else {
+ *data = entry->data;
+ *size = entry->size;
+ }
+
+ /* Test PIC */
+ if (type != IWL_PHY_DB_CFG)
+ iwl_phy_db_test_pic(*(((__le32 *)*data) +
+ (*size / sizeof(__le32)) - 1));
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, *size);
+
+ return 0;
+}
+
+static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
+ u16 length, void *data)
+{
+ struct iwl_phy_db_cmd phy_db_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = PHY_DB_CMD,
+ .flags = CMD_SYNC,
+ };
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sending PHY-DB hcmd of type %d, of length %d\n",
+ type, length);
+
+ /* Set phy db cmd variables */
+ phy_db_cmd.type = cpu_to_le16(type);
+ phy_db_cmd.length = cpu_to_le16(length);
+
+ /* Set hcmd variables */
+ cmd.data[0] = &phy_db_cmd;
+ cmd.len[0] = sizeof(struct iwl_phy_db_cmd);
+ cmd.data[1] = data;
+ cmd.len[1] = length;
+ cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+
+ return iwl_trans_send_cmd(phy_db->trans, &cmd);
+}
+
+static int iwl_phy_db_send_all_channel_groups(
+ struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u8 max_ch_groups)
+{
+ u16 i;
+ int err;
+ struct iwl_phy_db_entry *entry;
+
+ /* Send all the channel specific groups to operational fw */
+ for (i = 0; i < max_ch_groups; i++) {
+ entry = iwl_phy_db_get_section(phy_db,
+ type,
+ i);
+ if (!entry)
+ return -EINVAL;
+
+ /* Send the requested PHY DB section */
+ err = iwl_send_phy_db_cmd(phy_db,
+ type,
+ entry->size,
+ entry->data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Can't SEND phy_db section %d (%d), err %d",
+ type, i, err);
+ return err;
+ }
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sent PHY_DB HCMD, type = %d num = %d",
+ type, i);
+ }
+
+ return 0;
+}
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
+{
+ u8 *data = NULL;
+ u16 size = 0;
+ int err;
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sending phy db data and configuration to runtime image\n");
+
+ /* Send PHY DB CFG section */
+ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG,
+ &data, &size, 0);
+ if (err) {
+ IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n");
+ return err;
+ }
+
+ err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send HCMD of Phy DB cfg section\n");
+ return err;
+ }
+
+ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH,
+ &data, &size, 0);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot get Phy DB non specific channel section\n");
+ return err;
+ }
+
+ err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send HCMD of Phy DB non specific channel section\n");
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwl_phy_db_send_all_channel_groups(phy_db,
+ IWL_PHY_DB_CALIB_CHG_PAPD,
+ IWL_NUM_PAPD_CH_GROUPS);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send channel specific PAPD groups");
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwl_phy_db_send_all_channel_groups(phy_db,
+ IWL_PHY_DB_CALIB_CHG_TXP,
+ IWL_NUM_TXP_CH_GROUPS);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send channel specific TX power groups");
+ return err;
+ }
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Finished sending phy db non channel data\n");
+ return 0;
+}
+EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644
index 00000000000..d0e43d96ab3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PHYDB_H__
+#define __IWL_PHYDB_H__
+
+#include <linux/types.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+ gfp_t alloc_ctx);
+
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
+
+#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c3a4bb41e53..f76e9cad775 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,6 +97,9 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+/* Device system time */
+#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+
/**
* Tx Scheduler
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
index 1a226114fe7..ce0c67b425e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -466,9 +466,7 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
/* Hard-coded periphery absolute address */
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (!iwl_trans_grab_nic_access(trans, false)) {
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
return -EIO;
}
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
@@ -476,8 +474,7 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
for (i = 0; i < size; i += 4)
*(u32 *)(tst->mem.addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
- iwl_trans_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ iwl_trans_release_nic_access(trans, &flags);
} else { /* target memory (SRAM) */
iwl_trans_read_mem(trans, addr, tst->mem.addr,
tst->mem.size / 4);
@@ -506,19 +503,13 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
/* Periphery writes can be 1-3 bytes long, or DWORDs */
if (size < 4) {
memcpy(&val, buf, size);
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (!iwl_trans_grab_nic_access(trans, false)) {
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
return -EIO;
- }
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
(addr & 0x0000FFFF) |
((size - 1) << 24));
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
- iwl_trans_release_nic_access(trans);
- /* needed after consecutive writes w/o read */
- mmiowb();
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ iwl_trans_release_nic_access(trans, &flags);
} else {
if (size % 4)
return -EINVAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
index e13ffa8acc0..7fbf4d717ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 6ba211b0942..a963f45c684 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0f85eb30587..8c7bec6b9a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,6 +65,7 @@
#include <linux/ieee80211.h>
#include <linux/mm.h> /* for page_address */
+#include <linux/lockdep.h>
#include "iwl-debug.h"
#include "iwl-config.h"
@@ -193,11 +194,11 @@ struct iwl_rx_packet {
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
- CMD_SYNC = 0,
- CMD_ASYNC = BIT(0),
- CMD_WANT_SKB = BIT(1),
- CMD_WANT_HCMD = BIT(2),
- CMD_ON_DEMAND = BIT(3),
+ CMD_SYNC = 0,
+ CMD_ASYNC = BIT(0),
+ CMD_WANT_SKB = BIT(1),
+ CMD_WANT_HCMD = BIT(2),
+ CMD_ON_DEMAND = BIT(3),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -274,6 +275,7 @@ struct iwl_rx_cmd_buffer {
struct page *_page;
int _offset;
bool _page_stolen;
+ u32 _rx_page_order;
unsigned int truesize;
};
@@ -294,6 +296,11 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
return r->_page;
}
+static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
+{
+ __free_pages(r->_page, r->_rx_page_order);
+}
+
#define MAX_NO_RECLAIM_CMDS 6
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
@@ -410,8 +417,12 @@ struct iwl_trans;
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
- * @grab_nic_access: wake the NIC to be able to access non-HBUS regs
- * @release_nic_access: let the NIC go to sleep
+ * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
+ * Sleeping is not allowed between grab_nic_access and
+ * release_nic_access.
+ * @release_nic_access: let the NIC go to sleep. The "flags" parameter
+ * must be the same one that was sent before to the grab_nic_access.
+ * @set_bits_mask - set SRAM register according to value and mask.
*/
struct iwl_trans_ops {
@@ -454,8 +465,12 @@ struct iwl_trans_ops {
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
- bool (*grab_nic_access)(struct iwl_trans *trans, bool silent);
- void (*release_nic_access)(struct iwl_trans *trans);
+ bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
+ unsigned long *flags);
+ void (*release_nic_access)(struct iwl_trans *trans,
+ unsigned long *flags);
+ void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
+ u32 value);
};
/**
@@ -475,7 +490,6 @@ enum iwl_trans_state {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
- * @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
@@ -496,7 +510,6 @@ struct iwl_trans {
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
enum iwl_trans_state state;
- spinlock_t reg_lock;
struct device *dev;
u32 hw_rev;
@@ -514,6 +527,10 @@ struct iwl_trans {
struct dentry *dbgfs_dir;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map sync_cmd_lockdep_map;
+#endif
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -590,12 +607,22 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+ struct iwl_host_cmd *cmd)
{
+ int ret;
+
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- return trans->ops->send_cmd(trans, cmd);
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+
+ ret = trans->ops->send_cmd(trans, cmd);
+
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return ret;
}
static inline struct iwl_device_cmd *
@@ -756,14 +783,20 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
trans->ops->set_pmi(trans, state);
}
-#define iwl_trans_grab_nic_access(trans, silent) \
+static inline void
+iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
+{
+ trans->ops->set_bits_mask(trans, reg, mask, value);
+}
+
+#define iwl_trans_grab_nic_access(trans, silent, flags) \
__cond_lock(nic_access, \
- likely((trans)->ops->grab_nic_access(trans, silent)))
+ likely((trans)->ops->grab_nic_access(trans, silent, flags)))
static inline void __releases(nic_access)
-iwl_trans_release_nic_access(struct iwl_trans *trans)
+iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
{
- trans->ops->release_nic_access(trans);
+ trans->ops->release_nic_access(trans, flags);
__release(nic_access);
}
@@ -773,4 +806,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans)
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
+static inline void trans_lockdep_init(struct iwl_trans *trans)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key __key;
+
+ lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+ &__key, 0);
+#endif
+}
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
new file mode 100644
index 00000000000..807b250ec39
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_IWLMVM) += iwlmvm.o
+iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += power.o
+iwlmvm-y += led.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
+
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
new file mode 100644
index 00000000000..73d24aacb90
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ int idx;
+
+ struct iwl_mvm_phy_ctxt *phyctxt;
+
+ u16 ids[MAX_MACS_IN_BINDING];
+ u16 colors[MAX_MACS_IN_BINDING];
+};
+
+static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
+ struct iwl_mvm_iface_iterator_data *data)
+{
+ struct iwl_binding_cmd cmd;
+ struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
+ int i, ret;
+ u32 status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+ cmd.action = cpu_to_le32(action);
+ cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+
+ for (i = 0; i < MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+ for (i = 0; i < data->idx; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
+ data->colors[i]));
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ sizeof(cmd), &cmd, &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
+ action, ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif)
+ return;
+
+ if (mvmvif->phy_ctxt != data->phyctxt)
+ return;
+
+ if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
+ return;
+
+ data->ids[data->idx] = mvmvif->id;
+ data->colors[data->idx] = mvmvif->color;
+ data->idx++;
+}
+
+static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_phy_ctxt *phyctxt,
+ bool add)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_iface_iterator_data data = {
+ .ignore_vif = vif,
+ .phyctxt = phyctxt,
+ };
+ u32 action = FW_CTXT_ACTION_MODIFY;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_iface_iterator,
+ &data);
+
+ /*
+ * If there are no other interfaces yet we
+ * need to create a new binding.
+ */
+ if (data.idx == 0) {
+ if (add)
+ action = FW_CTXT_ACTION_ADD;
+ else
+ action = FW_CTXT_ACTION_REMOVE;
+ }
+
+ if (add) {
+ if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
+ return -EINVAL;
+
+ data.ids[data.idx] = mvmvif->id;
+ data.colors[data.idx] = mvmvif->color;
+ data.idx++;
+ }
+
+ return iwl_mvm_binding_cmd(mvm, action, &data);
+}
+
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
+}
+
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
new file mode 100644
index 00000000000..c64d864799c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -0,0 +1,955 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/cfg80211.h>
+#include <net/ipv6.h>
+#include "iwl-modparams.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (iwlwifi_mod_params.sw_crypto)
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ mvmvif->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ mvmvif->rekey_data.valid = true;
+
+ mutex_unlock(&mvm->mutex);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+ idx++;
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ mvmvif->num_target_ipv6_addrs = idx;
+}
+#endif
+
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->tx_key_idx = idx;
+}
+
+static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
+{
+ int i;
+
+ for (i = 0; i < IWL_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
+}
+
+struct wowlan_key_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwl_wowlan_tkip_params_cmd *tkip;
+ bool error, use_rsc_tsc, use_tkip;
+ int gtk_key_idx;
+};
+
+static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct wowlan_key_data *data = _data;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwl_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWL_P1K_SIZE];
+ int ret, i;
+
+ mutex_lock(&mvm->mutex);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
+ struct {
+ struct iwl_mvm_wep_key_cmd wep_key_cmd;
+ struct iwl_mvm_wep_key wep_key;
+ } __packed wkc = {
+ .wep_key_cmd.mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .wep_key_cmd.num_keys = 1,
+ /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+ .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
+ .wep_key.key_index = key->keyidx,
+ .wep_key.key_size = key->keylen,
+ };
+
+ /*
+ * This will fail -- the key functions don't set support
+ * pairwise WEP keys. However, that's better than silently
+ * failing WoWLAN. Or maybe not?
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ break;
+
+ memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+ if (key->keyidx == mvmvif->tx_key_idx) {
+ /* TX key must be at offset 0 */
+ wkc.wep_key.key_offset = 0;
+ } else {
+ /* others start at 1 */
+ data->gtk_key_idx++;
+ wkc.wep_key.key_offset = data->gtk_key_idx;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
+ sizeof(wkc), &wkc);
+ data->error = ret != 0;
+
+ /* don't upload key again */
+ goto out_unlock;
+ }
+ default:
+ data->error = true;
+ goto out_unlock;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /*
+ * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+ * but we also shouldn't abort suspend due to that. It does have
+ * support for the IGTK key renewal, but doesn't really use the
+ * IGTK for anything. This means we could spuriously wake up or
+ * be deauthenticated, but that was considered acceptable.
+ */
+ goto out_unlock;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+ rx_p1ks = data->tkip->rx_uni;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+ ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u8 *pn = seq.ccmp.pn;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ } else {
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc->pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ /*
+ * The D3 firmware hardcodes the key offset 0 as the key it uses
+ * to transmit packets to the AP, i.e. the PTK.
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ key->hw_key_idx = 0;
+ } else {
+ data->gtk_key_idx++;
+ key->hw_key_idx = data->gtk_key_idx;
+ }
+
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
+ data->error = ret != 0;
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .flags = CMD_SYNC,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
+static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_proto_offload_cmd cmd = {};
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+
+ if (mvmvif->num_target_ipv6_addrs) {
+ cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
+ memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
+ sizeof(mvmvif->target_ipv6_addrs[i]));
+
+ for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
+ memcpy(cmd.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.target_ipv6_addr[i]));
+#endif
+
+ if (vif->bss_conf.arp_addr_cnt) {
+ cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
+ cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (!cmd.enabled)
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+
+struct iwl_d3_iter_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ bool error;
+};
+
+static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d3_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return;
+
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ return;
+
+ if (data->vif) {
+ IWL_ERR(data->mvm, "More than one managed interface active!\n");
+ data->error = true;
+ return;
+ }
+
+ data->vif = vif;
+}
+
+static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *ctx;
+ u8 chains_static, chains_dynamic;
+ struct cfg80211_chan_def chandef;
+ int ret, i;
+ struct iwl_binding_cmd binding_cmd = {};
+ struct iwl_time_quota_cmd quota_cmd = {};
+ u32 status;
+
+ /* add back the PHY */
+ if (WARN_ON(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!ctx)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ chandef = ctx->def;
+ chains_static = ctx->rx_chains_static;
+ chains_dynamic = ctx->rx_chains_dynamic;
+ rcu_read_unlock();
+
+ ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
+ chains_static, chains_dynamic);
+ if (ret)
+ return ret;
+
+ /* add back the MAC */
+ mvmvif->uploaded = false;
+
+ if (WARN_ON(!vif->bss_conf.assoc))
+ return -EINVAL;
+ /* hack */
+ vif->bss_conf.assoc = false;
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ vif->bss_conf.assoc = true;
+ if (ret)
+ return ret;
+
+ /* add back binding - XXX refactor? */
+ binding_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ binding_cmd.phy =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ for (i = 1; i < MAX_MACS_IN_BINDING; i++)
+ binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ sizeof(binding_cmd), &binding_cmd,
+ &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ return -EIO;
+ }
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+ if (ret)
+ return ret;
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ if (ret)
+ return ret;
+
+ /* and some quota */
+ quota_cmd.quotas[0].id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ quota_cmd.quotas[0].quota = cpu_to_le32(100);
+ quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
+
+ for (i = 1; i < MAX_BINDINGS; i++)
+ quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ sizeof(quota_cmd), &quota_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+
+ return 0;
+}
+
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_d3_iter_data suspend_iter_data = {
+ .mvm = mvm,
+ };
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct iwl_d3_manager_config d3_cfg_cmd = {};
+ struct wowlan_key_data key_data = {
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret, i;
+ u16 seq;
+ u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ if (WARN_ON(!wowlan))
+ return -EINVAL;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ old_aux_sta_id = mvm->aux_sta.sta_id;
+
+ /* see if there's only a single BSS vif and it's associated */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_iface_iterator, &suspend_iter_data);
+
+ if (suspend_iter_data.error || !suspend_iter_data.vif) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ vif = suspend_iter_data.vif;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+
+ /*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. Store the real STA ID here
+ * and assign 0. When we leave this function, we'll restore
+ * the original value for the resume code.
+ */
+ old_ap_sta_id = mvm_ap_sta->sta_id;
+ mvm_ap_sta->sta_id = 0;
+ mvmvif->ap_sta_id = 0;
+
+ /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
+
+ wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
+
+ /*
+ * We know the last used seqno, and the uCode expects to know that
+ * one, it will increment before TX.
+ */
+ seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
+ wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 *before* using the value while we
+ * increment after using the value (i.e. store the next value to use).
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ seq = mvm_ap_sta->tid_data[i].seq_number;
+ seq -= 0x10;
+ wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
+ }
+
+ if (wowlan->disconnect)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ d3_cfg_cmd.wakeup_flags |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ iwl_mvm_cancel_scan(mvm);
+
+ iwl_trans_stop_device(mvm->trans);
+
+ /*
+ * Set the HW restart bit -- this is mostly true as we're
+ * going to load new firmware and reprogram that, though
+ * the reprogramming is going to be manual to avoid adding
+ * all the MACs that aren't support.
+ * We don't have to clear up everything though because the
+ * reprogramming is manual. When we resume, we'll actually
+ * go through a proper restart sequence again to switch
+ * back to the runtime firmware image.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ /* We reprogram keys and shouldn't allocate new key indices */
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+
+ /*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. As a result, we have to move
+ * the auxiliary station to ID 1 so the ID 0 remains free for
+ * the AP station for later.
+ * We set the sta_id to 1 here, and reset it to its previous
+ * value (that we stored above) later.
+ */
+ mvm->aux_sta.sta_id = 1;
+
+ ret = iwl_mvm_load_d3_fw(mvm);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+ if (ret)
+ goto out;
+
+ if (!iwlwifi_mod_params.sw_crypto) {
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&mvm->mutex);
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_wowlan_program_keys,
+ &key_data);
+ mutex_lock(&mvm->mutex);
+ if (key_data.error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ struct iwl_host_cmd rsc_tsc_cmd = {
+ .id = WOWLAN_TSC_RSC_PARAM,
+ .flags = CMD_SYNC,
+ .data[0] = key_data.rsc_tsc,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(*key_data.rsc_tsc),
+ };
+
+ ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (key_data.use_tkip) {
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_TKIP_PARAM,
+ CMD_SYNC, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (mvmvif->rekey_data.valid) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
+ NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
+ NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_KEK_KCK_MATERIAL,
+ CMD_SYNC,
+ sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
+ CMD_SYNC, sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_send_patterns(mvm, wowlan);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_send_proto_offload(mvm, vif);
+ if (ret)
+ goto out;
+
+ /* must be last -- this switches firmware state */
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+ if (ret)
+ goto out;
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ iwl_trans_d3_suspend(mvm->trans);
+ out:
+ mvm->aux_sta.sta_id = old_aux_sta_id;
+ mvm_ap_sta->sta_id = old_ap_sta_id;
+ mvmvif->ap_sta_id = old_ap_sta_id;
+ out_noreset:
+ kfree(key_data.rsc_tsc);
+ if (ret < 0)
+ ieee80211_restart_hw(mvm->hw);
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status *status;
+ u32 reasons;
+ int ret, len;
+ bool pkt8023 = false;
+ struct sk_buff *pkt = NULL;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(mvm, "error table is valid (%d)\n",
+ err_info.valid);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ wakeup.rfkill_release = true;
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ return;
+ }
+
+ /* only for tracing for now */
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+ return;
+ }
+
+ /* RF-kill already asserted again... */
+ if (!cmd.resp_pkt)
+ return;
+
+ len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out;
+ }
+
+ status = (void *)cmd.resp_pkt->data;
+
+ if (len - sizeof(struct iwl_cmd_header) !=
+ sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out;
+ }
+
+ reasons = le32_to_cpu(status->wakeup_reasons);
+
+ if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ wakeup_report = NULL;
+ goto report;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
+ wakeup.magic_pkt = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
+ wakeup.pattern_idx =
+ le16_to_cpu(status->pattern_number);
+ pkt8023 = true;
+ }
+
+ if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ wakeup.disconnect = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
+ wakeup.gtk_rekey_failure = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+ wakeup.rfkill_release = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
+ wakeup.eap_identity_req = true;
+ pkt8023 = true;
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
+ wakeup.four_way_handshake = true;
+ pkt8023 = true;
+ }
+
+ if (status->wake_packet_bufsize) {
+ u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
+ u32 pktlen = le32_to_cpu(status->wake_packet_length);
+
+ if (pkt8023) {
+ pkt = alloc_skb(pktsize, GFP_KERNEL);
+ if (!pkt)
+ goto report;
+ memcpy(skb_put(pkt, pktsize), status->wake_packet,
+ pktsize);
+ if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+ goto report;
+ wakeup.packet = pkt->data;
+ wakeup.packet_present_len = pkt->len;
+ wakeup.packet_len = pkt->len - (pktlen - pktsize);
+ wakeup.packet_80211 = false;
+ } else {
+ wakeup.packet = status->wake_packet;
+ wakeup.packet_present_len = pktsize;
+ wakeup.packet_len = pktlen;
+ wakeup.packet_80211 = true;
+ }
+ }
+
+ report:
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ kfree_skb(pkt);
+
+ out:
+ iwl_free_resp(&cmd);
+}
+
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_d3_iter_data resume_iter_data = {
+ .mvm = mvm,
+ };
+ struct ieee80211_vif *vif = NULL;
+ int ret;
+ enum iwl_d3_status d3_status;
+
+ mutex_lock(&mvm->mutex);
+
+ /* get the BSS vif pointer again */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_iface_iterator, &resume_iter_data);
+
+ if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif))
+ goto out_unlock;
+
+ vif = resume_iter_data.vif;
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status);
+ if (ret)
+ goto out_unlock;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ goto out_unlock;
+ }
+
+ iwl_mvm_query_wakeup_reasons(mvm, vif);
+
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ if (vif)
+ ieee80211_resume_disconnect(vif);
+
+ /* return 1 to reconfigure the device */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ return 1;
+}
+
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ device_set_wakeup_enable(mvm->trans->dev, enabled);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
new file mode 100644
index 00000000000..c1bdb558212
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -0,0 +1,378 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "sta.h"
+#include "iwl-io.h"
+
+struct iwl_dbgfs_mvm_ctx {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+};
+
+static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+
+ char buf[16];
+ int buf_size, ret;
+ u32 scd_q_msk;
+
+ if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &scd_q_msk) != 1)
+ return -EINVAL;
+
+ IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct ieee80211_sta *sta;
+
+ char buf[8];
+ int buf_size, sta_id, drain, ret;
+
+ if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ ret = -ENOENT;
+ else
+ ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
+ count;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ const struct fw_img *img;
+ int ofs, len, pos = 0;
+ size_t bufsz, ret;
+ char *buf;
+ u8 *ptr;
+
+ /* default is to dump the entire data segment */
+ if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
+ mvm->dbgfs_sram_offset = 0x800000;
+ if (!mvm->ucode_loaded)
+ return -EINVAL;
+ img = &mvm->fw->img[mvm->cur_ucode];
+ mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ }
+ len = mvm->dbgfs_sram_len;
+
+ bufsz = len * 4 + 256;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ptr = kzalloc(len, GFP_KERNEL);
+ if (!ptr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ mvm->dbgfs_sram_offset);
+
+ iwl_trans_read_mem_bytes(mvm->trans,
+ mvm->dbgfs_sram_offset,
+ ptr, len);
+ for (ofs = 0; ofs < len; ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
+ bufsz - pos, false);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ kfree(buf);
+ kfree(ptr);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ if ((offset & 0x3) || (len & 0x3))
+ return -EINVAL;
+ mvm->dbgfs_sram_offset = offset;
+ mvm->dbgfs_sram_len = len;
+ } else {
+ mvm->dbgfs_sram_offset = 0;
+ mvm->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct ieee80211_sta *sta;
+ char buf[400];
+ int i, pos = 0, bufsz = sizeof(buf);
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta)
+ pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+ else if (IS_ERR(sta))
+ pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
+ PTR_ERR(sta));
+ else
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8] = {};
+ int allow;
+
+ if (!mvm->ucode_loaded)
+ return -EIO;
+
+ if (copy_from_user(buf, user_buf, sizeof(buf)))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d", &allow) != 1)
+ return -EINVAL;
+
+ IWL_DEBUG_POWER(mvm, "%s device power down\n",
+ allow ? "allow" : "prevent");
+
+ /*
+ * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
+ */
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8] = {};
+ int allow;
+
+ if (copy_from_user(buf, user_buf, sizeof(buf)))
+ return -EFAULT;
+
+ if (sscanf(buf, "%d", &allow) != 1)
+ return -EINVAL;
+
+ IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
+ allow ? "allow" : "prevent");
+
+ /*
+ * TODO: When WoWLAN FW alive notification happens, driver will send
+ * REPLY_DEBUG_CMD setting power_down_allow flag according to
+ * mvm->prevent_power_down_d3
+ */
+ mvm->prevent_power_down_d3 = !allow;
+
+ return count;
+}
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, mvm, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+/* Device wide debugfs entries */
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
+MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+{
+ char buf[100];
+
+ mvm->debugfs_dir = dbgfs_dir;
+
+ MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+
+ /*
+ * Create a symlink with mac80211. It will be removed when mac80211
+ * exists (before the opmode exists which removes the target.)
+ */
+ snprintf(buf, 100, "../../%s/%s",
+ dbgfs_dir->d_parent->d_parent->d_name.name,
+ dbgfs_dir->d_parent->d_name.name);
+ if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
+ goto err;
+
+ return 0;
+err:
+ IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
new file mode 100644
index 00000000000..cf6f9a02fb7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_d3_h__
+#define __fw_api_d3_h__
+
+/**
+ * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
+ * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
+ */
+enum iwl_d3_wakeup_flags {
+ IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
+
+/**
+ * struct iwl_d3_manager_config - D3 manager configuration command
+ * @min_sleep_time: minimum sleep time (in usec)
+ * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ *
+ * The structure is used for the D3_CONFIG_CMD command.
+ */
+struct iwl_d3_manager_config {
+ __le32 min_sleep_time;
+ __le32 wakeup_flags;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
+
+
+/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
+
+/**
+ * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
+ * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
+ */
+enum iwl_proto_offloads {
+ IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
+ IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
+};
+
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2
+
+/**
+ * struct iwl_proto_offload_cmd - ARP/NS offload configuration
+ * @enabled: enable flags
+ * @remote_ipv4_addr: remote address to answer to (or zero if all)
+ * @host_ipv4_addr: our IPv4 address to respond to queries for
+ * @arp_mac_addr: our MAC address for ARP responses
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd {
+ __le32 enabled;
+ __be32 remote_ipv4_addr;
+ __be32 host_ipv4_addr;
+ u8 arp_mac_addr[ETH_ALEN];
+ __le16 reserved1;
+
+ u8 remote_ipv6_addr[16];
+ u8 solicited_node_ipv6_addr[16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
+ u8 ndp_mac_addr[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+
+
+/*
+ * WOWLAN_PATTERNS
+ */
+#define IWL_WOWLAN_MIN_PATTERN_LEN 16
+#define IWL_WOWLAN_MAX_PATTERN_LEN 128
+
+struct iwl_wowlan_pattern {
+ u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+ u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
+ u8 mask_size;
+ u8 pattern_size;
+ __le16 reserved;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
+
+#define IWL_WOWLAN_MAX_PATTERNS 20
+
+struct iwl_wowlan_patterns_cmd {
+ __le32 n_patterns;
+ struct iwl_wowlan_pattern patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+
+enum iwl_wowlan_wakeup_filters {
+ IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
+ IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
+ IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
+ IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
+ IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+ IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+ IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
+ IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
+ IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
+ IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
+ /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
+
+struct iwl_wowlan_config_cmd {
+ __le32 wakeup_filter;
+ __le16 non_qos_seq;
+ __le16 qos_seq[8];
+ u8 wowlan_ba_teardown_tids;
+ u8 is_11n_connection;
+} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
+
+/*
+ * WOWLAN_TSC_RSC_PARAMS
+ */
+#define IWL_NUM_RSC 16
+
+struct tkip_sc {
+ __le16 iv16;
+ __le16 pad;
+ __le32 iv32;
+} __packed; /* TKIP_SC_API_U_VER_1 */
+
+struct iwl_tkip_rsc_tsc {
+ struct tkip_sc unicast_rsc[IWL_NUM_RSC];
+ struct tkip_sc multicast_rsc[IWL_NUM_RSC];
+ struct tkip_sc tsc;
+} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
+
+struct aes_sc {
+ __le64 pn;
+} __packed; /* TKIP_AES_SC_API_U_VER_1 */
+
+struct iwl_aes_rsc_tsc {
+ struct aes_sc unicast_rsc[IWL_NUM_RSC];
+ struct aes_sc multicast_rsc[IWL_NUM_RSC];
+ struct aes_sc tsc;
+} __packed; /* AES_TSC_RSC_API_S_VER_1 */
+
+union iwl_all_tsc_rsc {
+ struct iwl_tkip_rsc_tsc tkip;
+ struct iwl_aes_rsc_tsc aes;
+}; /* ALL_TSC_RSC_API_S_VER_2 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd {
+ union iwl_all_tsc_rsc all_tsc_rsc;
+} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+
+#define IWL_MIC_KEY_SIZE 8
+struct iwl_mic_keys {
+ u8 tx[IWL_MIC_KEY_SIZE];
+ u8 rx_unicast[IWL_MIC_KEY_SIZE];
+ u8 rx_mcast[IWL_MIC_KEY_SIZE];
+} __packed; /* MIC_KEYS_API_S_VER_1 */
+
+#define IWL_P1K_SIZE 5
+struct iwl_p1k_cache {
+ __le16 p1k[IWL_P1K_SIZE];
+} __packed;
+
+#define IWL_NUM_RX_P1K_CACHE 2
+
+struct iwl_wowlan_tkip_params_cmd {
+ struct iwl_mic_keys mic_keys;
+ struct iwl_p1k_cache tx;
+ struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+ struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+
+#define IWL_KCK_MAX_SIZE 32
+#define IWL_KEK_MAX_SIZE 32
+
+struct iwl_wowlan_kek_kck_material_cmd {
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+
+#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
+
+enum iwl_wowlan_rekey_status {
+ IWL_WOWLAN_REKEY_POST_REKEY = 0,
+ IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
+}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
+
+enum iwl_wowlan_wakeup_reason {
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
+ IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
+ IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
+ IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
+ IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
+ IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
+ IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
+}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
+
+struct iwl_wowlan_status {
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 rekey_status;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+
+/* TODO: NetDetect API */
+
+#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
new file mode 100644
index 00000000000..ae39b7dfda7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -0,0 +1,369 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_mac_h__
+#define __fw_api_mac_h__
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define MAC_INDEX_AUX 4
+#define MAC_INDEX_MIN_DRIVER 0
+#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
+
+#define AC_NUM 4 /* Number of access categories */
+
+/**
+ * enum iwl_mac_protection_flags - MAC context flags
+ * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+enum iwl_mac_protection_flags {
+ MAC_PROT_FLG_TGG_PROTECT = BIT(3),
+ MAC_PROT_FLG_HT_PROT = BIT(23),
+ MAC_PROT_FLG_FAT_PROT = BIT(24),
+ MAC_PROT_FLG_SELF_CTS_EN = BIT(30),
+};
+
+#define MAC_FLG_SHORT_SLOT BIT(4)
+#define MAC_FLG_SHORT_PREAMBLE BIT(5)
+
+/**
+ * enum iwl_mac_types - Supported MAC types
+ * @FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @FW_MAC_TYPE_IBSS: IBSS
+ * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @FW_MAC_TYPE_P2P_STA: P2P client
+ * @FW_MAC_TYPE_GO: P2P GO
+ * @FW_MAC_TYPE_TEST: ?
+ * @FW_MAC_TYPE_MAX: highest support MAC type
+ */
+enum iwl_mac_types {
+ FW_MAC_TYPE_FIRST = 1,
+ FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
+ FW_MAC_TYPE_LISTENER,
+ FW_MAC_TYPE_PIBSS,
+ FW_MAC_TYPE_IBSS,
+ FW_MAC_TYPE_BSS_STA,
+ FW_MAC_TYPE_P2P_DEVICE,
+ FW_MAC_TYPE_P2P_STA,
+ FW_MAC_TYPE_GO,
+ FW_MAC_TYPE_TEST,
+ FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
+}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_tsf_id - TSF hw timer ID
+ * @TSF_ID_A: use TSF A
+ * @TSF_ID_B: use TSF B
+ * @TSF_ID_C: use TSF C
+ * @TSF_ID_D: use TSF D
+ * @NUM_TSF_IDS: number of TSF timers available
+ */
+enum iwl_tsf_id {
+ TSF_ID_A = 0,
+ TSF_ID_B = 1,
+ TSF_ID_C = 2,
+ TSF_ID_D = 3,
+ NUM_TSF_IDS = 4,
+}; /* TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ap {
+ __le32 beacon_time;
+ __le64 beacon_tsf;
+ __le32 bi;
+ __le32 bi_reciprocal;
+ __le32 dtim_interval;
+ __le32 dtim_reciprocal;
+ __le32 mcast_qid;
+ __le32 beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ */
+struct iwl_mac_data_ibss {
+ __le32 beacon_time;
+ __le64 beacon_tsf;
+ __le32 bi;
+ __le32 bi_reciprocal;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwl_mac_data_sta {
+ __le32 is_assoc;
+ __le32 dtim_time;
+ __le64 dtim_tsf;
+ __le32 bi;
+ __le32 bi_reciprocal;
+ __le32 dtim_interval;
+ __le32 dtim_reciprocal;
+ __le32 listen_interval;
+ __le32 assoc_id;
+ __le32 assoc_beacon_arrive_time;
+} __packed; /* STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwl_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwl_mac_data_go {
+ struct iwl_mac_data_ap ap;
+ __le32 ctwin;
+ __le32 opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwl_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwl_mac_data_p2p_sta {
+ struct iwl_mac_data_sta sta;
+ __le32 ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwl_mac_data_pibss {
+ __le32 stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwl_mac_data_p2p_dev {
+ __le32 is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwl_mac_filter_flags - MAC context filter flags
+ * @MAC_FILTER_IN_PROMISC: accept all data frames
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ * control frames to the host
+ * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+enum iwl_mac_filter_flags {
+ MAC_FILTER_IN_PROMISC = BIT(0),
+ MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1),
+ MAC_FILTER_ACCEPT_GRP = BIT(2),
+ MAC_FILTER_DIS_DECRYPT = BIT(3),
+ MAC_FILTER_DIS_GRP_DECRYPT = BIT(4),
+ MAC_FILTER_IN_BEACON = BIT(6),
+ MAC_FILTER_OUT_BCAST = BIT(8),
+ MAC_FILTER_IN_CRC32 = BIT(11),
+ MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+};
+
+/**
+ * enum iwl_mac_qos_flags - QoS flags
+ * @MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @MAC_QOS_FLG_TGN: HT is enabled
+ * @MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+enum iwl_mac_qos_flags {
+ MAC_QOS_FLG_UPDATE_EDCA = BIT(0),
+ MAC_QOS_FLG_TGN = BIT(1),
+ MAC_QOS_FLG_TXOP_TYPE = BIT(4),
+};
+
+/**
+ * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwl_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifsn;
+ u8 fifos_mask;
+ __le16 edca_txop;
+} __packed; /* AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @mac_type: one of FW_MAC_TYPE_*
+ * @tsd_id: TSF HW timer, one of TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of MAC_FILTER_*
+ * @qos_flags: from MAC_QOS_FLG_*
+ * @ac: one iwl_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwl_mac_data_*, according to mac_type
+ */
+struct iwl_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ __le32 mac_type;
+ __le32 tsf_id;
+ u8 node_addr[6];
+ __le16 reserved_for_node_addr;
+ u8 bssid_addr[6];
+ __le16 reserved_for_bssid_addr;
+ __le32 cck_rates;
+ __le32 ofdm_rates;
+ __le32 protection_flags;
+ __le32 cck_short_preamble;
+ __le32 short_slot;
+ __le32 filter_flags;
+ /* MAC_QOS_PARAM_API_S_VER_1 */
+ __le32 qos_flags;
+ struct iwl_ac_qos ac[AC_NUM+1];
+ /* MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwl_mac_data_ap ap;
+ struct iwl_mac_data_go go;
+ struct iwl_mac_data_sta sta;
+ struct iwl_mac_data_p2p_sta p2p_sta;
+ struct iwl_mac_data_p2p_dev p2p_dev;
+ struct iwl_mac_data_pibss pibss;
+ struct iwl_mac_data_ibss ibss;
+ };
+} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline u32 iwl_mvm_reciprocal(u32 v)
+{
+ if (!v)
+ return 0;
+ return 0xFFFFFFFF / v;
+}
+
+#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
new file mode 100644
index 00000000000..be36b7604b7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_power_h__
+#define __fw_api_power_h__
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * enum iwl_scan_flags - masks for power table command flags
+ * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @POWER_FLAGS_SLEEP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+*/
+enum iwl_power_flags {
+ POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(0),
+ POWER_FLAGS_SLEEP_OVER_DTIM_MSK = BIT(1),
+ POWER_FLAGS_LPRX_ENA_MSK = BIT(2),
+ POWER_FLAGS_SNOOZE_ENA_MSK = BIT(3),
+ POWER_FLAGS_BT_SCO_ENA = BIT(4),
+ POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(5)
+};
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @id_and_color: MAC contex identifier
+ * @action: Action on context - no action, add new,
+ * modify existent, remove
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: TBD
+ * @snooze_window: TBD
+ * @snooze_step: TBD
+ * @qndp_tid: TBD
+ * @uapsd_ac_flags: TBD
+ * @uapsd_max_sp: TBD
+ */
+struct iwl_powertable_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le16 flags;
+ u8 reserved;
+ __le16 keep_alive_seconds;
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 rx_data_timeout_uapsd;
+ __le32 tx_data_timeout_uapsd;
+ u8 lprx_rssi_threshold;
+ u8 num_skip_dtim;
+ __le16 snooze_interval;
+ __le16 snooze_window;
+ u8 snooze_step;
+ u8 qndp_tid;
+ u8 uapsd_ac_flags;
+ u8 uapsd_max_sp;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
new file mode 100644
index 00000000000..aa3474d0823
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_rs_h__
+#define __fw_api_rs_h__
+
+#include "fw-api-mac.h"
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ */
+enum {
+ IWL_RATE_1M_INDEX = 0,
+ IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+ IWL_RATE_2M_INDEX,
+ IWL_RATE_5M_INDEX,
+ IWL_RATE_11M_INDEX,
+ IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX,
+ IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX,
+ IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX,
+ IWL_RATE_54M_INDEX,
+ IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
+ IWL_RATE_60M_INDEX,
+ IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
+ IWL_RATE_COUNT,
+};
+
+#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+enum {
+ IWL_RATE_6M_PLCP = 13,
+ IWL_RATE_9M_PLCP = 15,
+ IWL_RATE_12M_PLCP = 5,
+ IWL_RATE_18M_PLCP = 7,
+ IWL_RATE_24M_PLCP = 9,
+ IWL_RATE_36M_PLCP = 11,
+ IWL_RATE_48M_PLCP = 1,
+ IWL_RATE_54M_PLCP = 3,
+ IWL_RATE_1M_PLCP = 10,
+ IWL_RATE_2M_PLCP = 20,
+ IWL_RATE_5M_PLCP = 55,
+ IWL_RATE_11M_PLCP = 110,
+};
+
+/*
+ * rate_n_flags bit fields
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define RATE_MCS_VHT_POS 26
+#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define RATE_HT_MCS_RATE_CODE_MSK 0x7
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_HT_MCS_GF_POS 10
+#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS)
+
+#define RATE_HT_MCS_INDEX_MSK 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define RATE_VHT_MCS_NSS_POS 4
+#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define RATE_LEGACY_RATE_MSK 0xff
+
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define RATE_MCS_CHAN_WIDTH_POS 11
+#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define RATE_MCS_ANT_POS 14
+#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \
+ RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
+ RATE_MCS_ANT_C_MSK)
+#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
+#define RATE_MCS_ANT_NUM 3
+
+/* Bit 17-18: (0) SS, (1) SS*2 */
+#define RATE_MCS_STBC_POS 17
+#define RATE_MCS_STBC_MSK (1 << RATE_MCS_STBC_POS)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define RATE_MCS_BF_POS 19
+#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
+
+/* Bit 20: (0) ZLF is off, (1) ZLF is on */
+#define RATE_MCS_ZLF_POS 20
+#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define RATE_MCS_DUP_POS 24
+#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS 27
+#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags, only this one is available */
+#define LQ_FLAG_SET_STA_TLC_RTS_MSK BIT(0)
+
+/**
+ * struct iwl_lq_cmd - link quality command
+ * @sta_id: station to update
+ * @control: not used
+ * @flags: combination of LQ_FLAG_*
+ * @mimo_delim: the first SISO index in rs_table, which separates MIMO
+ * and SISO rates
+ * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
+ * Should be ANT_[ABC]
+ * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
+ * @initial_rate_index: first index from rs_table per AC category
+ * @agg_time_limit: aggregation max time threshold in usec/100, meaning
+ * value of 100 is one usec. Range is 100 to 8000
+ * @agg_disable_start_th: try-count threshold for starting aggregation.
+ * If a frame has higher try-count, it should not be selected for
+ * starting an aggregation sequence.
+ * @agg_frame_cnt_limit: max frame count in an aggregation.
+ * 0: no limit
+ * 1: no aggregation (one frame per aggregation)
+ * 2 - 0x3f: maximal number of frames (up to 3f == 63)
+ * @rs_table: array of rates for each TX try, each is rate_n_flags,
+ * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
+ * @bf_params: beam forming params, currently not used
+ */
+struct iwl_lq_cmd {
+ u8 sta_id;
+ u8 reserved1;
+ u16 control;
+ /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+ u8 flags;
+ u8 mimo_delim;
+ u8 single_stream_ant_msk;
+ u8 dual_stream_ant_msk;
+ u8 initial_rate_index[AC_NUM];
+ /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+ __le16 agg_time_limit;
+ u8 agg_disable_start_th;
+ u8 agg_frame_cnt_limit;
+ __le32 reserved2;
+ __le32 rs_table[LQ_MAX_RETRY_NUM];
+ __le32 bf_params;
+}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
new file mode 100644
index 00000000000..670ac8f95e2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_scan_h__
+#define __fw_api_scan_h__
+
+#include "fw-api.h"
+
+/* Scan Commands, Responses, Notifications */
+
+/* Masks for iwl_scan_channel.type flags */
+#define SCAN_CHANNEL_TYPE_PASSIVE 0
+#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
+#define SCAN_CHANNEL_NARROW_BAND BIT(22)
+
+/* Max number of IEs for direct SSID scans in a command */
+#define PROBE_OPTION_MAX 20
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * @channel: band is selected by iwl_scan_cmd "flags" field
+ * @tx_gain: gain for analog radio
+ * @dsp_atten: gain for DSP
+ * @active_dwell: dwell time for active scan in TU, typically 5-50
+ * @passive_dwell: dwell time for passive scan in TU, typically 20-500
+ * @type: type is broken down to these bits:
+ * bit 0: 0 = passive, 1 = active
+ * bits 1-20: SSID direct bit map. If any of these bits is set then
+ * the corresponding SSID IE is transmitted in probe request
+ * (bit i adds IE in position i to the probe request)
+ * bit 22: channel width, 0 = regular, 1 = TGj narrow channel
+ *
+ * @iteration_count:
+ * @iteration_interval:
+ * This struct is used once for each channel in the scan list.
+ * Each channel can independently select:
+ * 1) SSID for directed active scans
+ * 2) Txpower setting (for rate specified within Tx command)
+ * 3) How long to stay on-channel (behavior may be modified by quiet_time,
+ * quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1) If using passive_dwell (i.e. passive_dwell != 0):
+ * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2) quiet_time <= active_dwell
+ * 3) If restricting off-channel time (i.e. max_out_time !=0):
+ * passive_dwell < max_out_time
+ * active_dwell < max_out_time
+ */
+struct iwl_scan_channel {
+ __le32 type;
+ __le16 channel;
+ __le16 iteration_count;
+ __le32 iteration_interval;
+ __le16 active_dwell;
+ __le16 passive_dwell;
+} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+ u8 id;
+ u8 len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/**
+ * iwl_scan_flags - masks for scan command flags
+ *@SCAN_FLAGS_PERIODIC_SCAN:
+ *@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
+ *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
+ *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
+ *@SCAN_FLAGS_FRAGMENTED_SCAN:
+ */
+enum iwl_scan_flags {
+ SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
+ SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = BIT(1),
+ SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
+ SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
+ SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
+};
+
+/**
+ * enum iwl_scan_type - Scan types for scan command
+ * @SCAN_TYPE_FORCED:
+ * @SCAN_TYPE_BACKGROUND:
+ * @SCAN_TYPE_OS:
+ * @SCAN_TYPE_ROAMING:
+ * @SCAN_TYPE_ACTION:
+ * @SCAN_TYPE_DISCOVERY:
+ * @SCAN_TYPE_DISCOVERY_FORCED:
+ */
+enum iwl_scan_type {
+ SCAN_TYPE_FORCED = 0,
+ SCAN_TYPE_BACKGROUND = 1,
+ SCAN_TYPE_OS = 2,
+ SCAN_TYPE_ROAMING = 3,
+ SCAN_TYPE_ACTION = 4,
+ SCAN_TYPE_DISCOVERY = 5,
+ SCAN_TYPE_DISCOVERY_FORCED = 6,
+}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
+
+/* Maximal number of channels to scan */
+#define MAX_NUM_SCAN_CHANNELS 0x24
+
+/**
+ * struct iwl_scan_cmd - scan request command
+ * ( SCAN_REQUEST_CMD = 0x80 )
+ * @len: command length in bytes
+ * @scan_flags: scan flags from SCAN_FLAGS_*
+ * @channel_count: num of channels in channel list (1 - MAX_NUM_SCAN_CHANNELS)
+ * @quiet_time: in msecs, dwell this time for active scan on quiet channels
+ * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
+ * this number of packets were received (typically 1)
+ * @passive2active: is auto switching from passive to active allowed (0 or 1)
+ * @rxchain_sel_flags: RXON_RX_CHAIN_*
+ * @max_out_time: in usecs, max out of serving channel time
+ * @suspend_time: how long to pause scan when returning to service channel:
+ * bits 0-19: beacon interal in usecs (suspend before executing)
+ * bits 20-23: reserved
+ * bits 24-31: number of beacons (suspend between channels)
+ * @rxon_flags: RXON_FLG_*
+ * @filter_flags: RXON_FILTER_*
+ * @tx_cmd: for active scans (zero for passive), w/o payload,
+ * no RS so specify TX rate
+ * @direct_scan: direct scan SSIDs
+ * @type: one of SCAN_TYPE_*
+ * @repeats: how many time to repeat the scan
+ */
+struct iwl_scan_cmd {
+ __le16 len;
+ u8 scan_flags;
+ u8 channel_count;
+ __le16 quiet_time;
+ __le16 quiet_plcp_th;
+ __le16 passive2active;
+ __le16 rxchain_sel_flags;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ /* RX_ON_FLAGS_API_S_VER_1 */
+ __le32 rxon_flags;
+ __le32 filter_flags;
+ struct iwl_tx_cmd tx_cmd;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 type;
+ __le32 repeats;
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwl_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ u8 data[0];
+} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
+
+/* Response to scan request contains only status with one of these values */
+#define SCAN_RESPONSE_OK 0x1
+#define SCAN_RESPONSE_ERROR 0x2
+
+/*
+ * SCAN_ABORT_CMD = 0x81
+ * When scan abort is requested, the command has no fields except the common
+ * header. The response contains only a status with one of these values.
+ */
+#define SCAN_ABORT_POSSIBLE 0x1
+#define SCAN_ABORT_IGNORED 0x2 /* no pending scans */
+
+/* TODO: complete documentation */
+#define SCAN_OWNER_STATUS 0x1
+#define MEASURE_OWNER_STATUS 0x2
+
+/**
+ * struct iwl_scan_start_notif - notifies start of scan in the device
+ * ( SCAN_START_NOTIFICATION = 0x82 )
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @beacon_timer: structured as follows:
+ * bits 0:19 - beacon interval in usecs
+ * bits 20:23 - reserved (0)
+ * bits 24:31 - number of beacons
+ * @channel: which channel is scanned
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @status: one of *_OWNER_STATUS
+ */
+struct iwl_scan_start_notif {
+ __le32 tsf_low;
+ __le32 tsf_high;
+ __le32 beacon_timer;
+ u8 channel;
+ u8 band;
+ u8 reserved[2];
+ __le32 status;
+} __packed; /* SCAN_START_NTF_API_S_VER_1 */
+
+/* scan results probe_status first bit indicates success */
+#define SCAN_PROBE_STATUS_OK 0
+#define SCAN_PROBE_STATUS_TX_FAILED BIT(0)
+/* error statuses combined with TX_FAILED */
+#define SCAN_PROBE_STATUS_FAIL_TTL BIT(1)
+#define SCAN_PROBE_STATUS_FAIL_BT BIT(2)
+
+/* How many statistics are gathered for each channel */
+#define SCAN_RESULTS_STATISTICS 1
+
+/**
+ * enum iwl_scan_complete_status - status codes for scan complete notifications
+ * @SCAN_COMP_STATUS_OK: scan completed successfully
+ * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
+ * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
+ * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
+ * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
+ * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
+ * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
+ * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
+ * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
+ * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
+ * (not an error!)
+ * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repeatition the driver
+ * asked for
+ * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
+*/
+enum iwl_scan_complete_status {
+ SCAN_COMP_STATUS_OK = 0x1,
+ SCAN_COMP_STATUS_ABORT = 0x2,
+ SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
+ SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
+ SCAN_COMP_STATUS_ERR_PROBE = 0x5,
+ SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
+ SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
+ SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
+ SCAN_COMP_STATUS_ERR_COEX = 0x9,
+ SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
+ SCAN_COMP_STATUS_ITERATION_END = 0x0B,
+ SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
+};
+
+/**
+ * struct iwl_scan_results_notif - scan results for one channel
+ * ( SCAN_RESULTS_NOTIFICATION = 0x83 )
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ * @statistics: statistics gathered for this channel
+ */
+struct iwl_scan_results_notif {
+ u8 channel;
+ u8 band;
+ u8 probe_status;
+ u8 num_probe_not_sent;
+ __le32 duration;
+ __le32 statistics[SCAN_RESULTS_STATISTICS];
+} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */
+
+/**
+ * struct iwl_scan_complete_notif - notifies end of scanning (all channels)
+ * ( SCAN_COMPLETE_NOTIFICATION = 0x84 )
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: all scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_scan_complete_notif {
+ u8 scanned_channels;
+ u8 status;
+ u8 bt_status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+ struct iwl_scan_results_notif results[MAX_NUM_SCAN_CHANNELS];
+} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
+
+/* scan offload */
+#define IWL_MAX_SCAN_CHANNELS 40
+#define IWL_SCAN_MAX_BLACKLIST_LEN 64
+#define IWL_SCAN_MAX_PROFILES 11
+#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define CAN_ABORT_STATUS 1
+
+#define IWL_FULL_SCAN_MULTIPLIER 5
+#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+
+/**
+ * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
+ * @scan_flags: see enum iwl_scan_flags
+ * @channel_count: channels in channel list
+ * @quiet_time: dwell time, in milisiconds, on quiet channel
+ * @quiet_plcp_th: quiet channel num of packets threshold
+ * @good_CRC_th: passive to active promotion threshold
+ * @rx_chain: RXON rx chain.
+ * @max_out_time: max uSec to be out of assoceated channel
+ * @suspend_time: pause scan this long when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXONfilter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_type: see enum iwl_scan_type.
+ * @rep_count: repetition count for each scheduled scan iteration.
+ */
+struct iwl_scan_offload_cmd {
+ __le16 len;
+ u8 scan_flags;
+ u8 channel_count;
+ __le16 quiet_time;
+ __le16 quiet_plcp_th;
+ __le16 good_CRC_th;
+ __le16 rx_chain;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ /* RX_ON_FLAGS_API_S_VER_1 */
+ __le32 flags;
+ __le32 filter_flags;
+ struct iwl_tx_cmd tx_cmd[2];
+ /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 scan_type;
+ __le32 rep_count;
+} __packed;
+
+enum iwl_scan_offload_channel_flags {
+ IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0),
+ IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22),
+ IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24),
+ IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25),
+};
+
+/**
+ * iwl_scan_channel_cfg - SCAN_CHANNEL_CFG_S
+ * @type: bitmap - see enum iwl_scan_offload_channel_flags.
+ * 0: passive (0) or active (1) scan.
+ * 1-20: directed scan to i'th ssid.
+ * 22: channel width configuation - 1 for narrow.
+ * 24: full scan.
+ * 25: partial scan.
+ * @channel_number: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two innteration on one channel.
+ * @dwell_time: entry 0 - active scan, entry 1 - passive scan.
+ */
+struct iwl_scan_channel_cfg {
+ __le32 type[IWL_MAX_SCAN_CHANNELS];
+ __le16 channel_number[IWL_MAX_SCAN_CHANNELS];
+ __le16 iter_count[IWL_MAX_SCAN_CHANNELS];
+ __le32 iter_interval[IWL_MAX_SCAN_CHANNELS];
+ u8 dwell_time[IWL_MAX_SCAN_CHANNELS][2];
+} __packed;
+
+/**
+ * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
+ * @scan_cmd: scan command fixed part
+ * @channel_cfg: scan channel configuration
+ * @data: probe request frames (one per band)
+ */
+struct iwl_scan_offload_cfg {
+ struct iwl_scan_offload_cmd scan_cmd;
+ struct iwl_scan_channel_cfg channel_cfg;
+ u8 data[0];
+} __packed;
+
+/**
+ * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ */
+struct iwl_scan_offload_blacklist {
+ u8 ssid[ETH_ALEN];
+ u8 reported_rssi;
+ u8 reserved;
+} __packed;
+
+enum iwl_scan_offload_network_type {
+ IWL_NETWORK_TYPE_BSS = 1,
+ IWL_NETWORK_TYPE_IBSS = 2,
+ IWL_NETWORK_TYPE_ANY = 3,
+};
+
+enum iwl_scan_offload_band_selection {
+ IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
+ IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
+ IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
+};
+
+/**
+ * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption olgorithm to match - bitmap
+ * @aut_alg: authentication olgorithm to match - bitmap
+ * @network_type: enum iwl_scan_offload_network_type
+ * @band_selection: enum iwl_scan_offload_band_selection
+ */
+struct iwl_scan_offload_profile {
+ u8 ssid_index;
+ u8 unicast_cipher;
+ u8 auth_alg;
+ u8 network_type;
+ u8 band_selection;
+ u8 reserved[3];
+} __packed;
+
+/**
+ * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blaclist: AP list to filter off from scan results
+ * @profiles: profiles to search for match
+ * @blacklist_len: length of blacklist
+ * @num_profiles: num of profiles in the list
+ */
+struct iwl_scan_offload_profile_cfg {
+ struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+ u8 blacklist_len;
+ u8 num_profiles;
+ u8 reserved[2];
+} __packed;
+
+/**
+ * iwl_scan_offload_schedule - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwl_scan_offload_schedule {
+ u16 delay;
+ u8 iterations;
+ u8 full_scan_mul;
+} __packed;
+
+/*
+ * iwl_scan_offload_flags
+ *
+ * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
+ * ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
+ * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
+ * on A band.
+ */
+enum iwl_scan_offload_flags {
+ IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID = BIT(0),
+ IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
+ IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
+};
+
+/**
+ * iwl_scan_offload_req - scan offload request command
+ * @flags: bitmap - enum iwl_scan_offload_flags.
+ * @watchdog: maximum scan duration in TU.
+ * @delay: delay in seconds before first iteration.
+ * @schedule_line: scan offload schedule, for fast and regular scan.
+ */
+struct iwl_scan_offload_req {
+ __le16 flags;
+ __le16 watchdog;
+ __le16 delay;
+ __le16 reserved;
+ struct iwl_scan_offload_schedule schedule_line[2];
+} __packed;
+
+enum iwl_scan_offload_compleate_status {
+ IWL_SCAN_OFFLOAD_COMPLETED = 1,
+ IWL_SCAN_OFFLOAD_ABORTED = 2,
+};
+
+/**
+ * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwl_scan_offload_compleate_status
+ */
+struct iwl_scan_offload_complete {
+ u8 last_schedule_line;
+ u8 last_schedule_iteration;
+ u8 status;
+ u8 reserved;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
new file mode 100644
index 00000000000..0acb53dda22
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_sta_h__
+#define __fw_api_sta_h__
+
+/**
+ * enum iwl_sta_flags - flags for the ADD_STA host command
+ * @STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @STA_FLG_REDUCED_TX_PWR_DATA:
+ * @STA_FLG_FLG_ANT_MSK: Antenna selection
+ * @STA_FLG_PS: set if STA is in Power Save
+ * @STA_FLG_INVALID: set if STA is invalid
+ * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @STA_FLG_DRAIN_FLOW: drain flow
+ * @STA_FLG_PAN: STA is for PAN interface
+ * @STA_FLG_CLASS_AUTH:
+ * @STA_FLG_CLASS_ASSOC:
+ * @STA_FLG_CLASS_MIMO_PROT:
+ * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @STA_FLG_MFP_EN: Management Frame Protection
+ */
+enum iwl_sta_flags {
+ STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
+ STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
+
+ STA_FLG_FLG_ANT_A = (1 << 4),
+ STA_FLG_FLG_ANT_B = (2 << 4),
+ STA_FLG_FLG_ANT_MSK = (STA_FLG_FLG_ANT_A |
+ STA_FLG_FLG_ANT_B),
+
+ STA_FLG_PS = BIT(8),
+ STA_FLG_INVALID = BIT(9),
+ STA_FLG_DLP_EN = BIT(10),
+ STA_FLG_SET_ALL_KEYS = BIT(11),
+ STA_FLG_DRAIN_FLOW = BIT(12),
+ STA_FLG_PAN = BIT(13),
+ STA_FLG_CLASS_AUTH = BIT(14),
+ STA_FLG_CLASS_ASSOC = BIT(15),
+ STA_FLG_RTS_MIMO_PROT = BIT(17),
+
+ STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
+ STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+
+ STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
+ STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+
+ STA_FLG_FAT_EN_20MHZ = (0 << 26),
+ STA_FLG_FAT_EN_40MHZ = (1 << 26),
+ STA_FLG_FAT_EN_80MHZ = (2 << 26),
+ STA_FLG_FAT_EN_160MHZ = (3 << 26),
+ STA_FLG_FAT_EN_MSK = (3 << 26),
+
+ STA_FLG_MIMO_EN_SISO = (0 << 28),
+ STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
+ STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
+ STA_FLG_MIMO_EN_MSK = (3 << 28),
+};
+
+/**
+ * enum iwl_sta_key_flag - key flags for the ADD_STA host command
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @STA_KEY_NOT_VALID: key is invalid
+ * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @STA_KEY_MULTICAST: set for multical key
+ * @STA_KEY_MFP: key is used for Management Frame Protection
+ */
+enum iwl_sta_key_flag {
+ STA_KEY_FLG_NO_ENC = (0 << 0),
+ STA_KEY_FLG_WEP = (1 << 0),
+ STA_KEY_FLG_CCM = (2 << 0),
+ STA_KEY_FLG_TKIP = (3 << 0),
+ STA_KEY_FLG_CMAC = (6 << 0),
+ STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
+ STA_KEY_FLG_EN_MSK = (7 << 0),
+
+ STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_KEYID_POS = 8,
+ STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
+ STA_KEY_NOT_VALID = BIT(11),
+ STA_KEY_FLG_WEP_13BYTES = BIT(12),
+ STA_KEY_MULTICAST = BIT(14),
+ STA_KEY_MFP = BIT(15),
+};
+
+/**
+ * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
+ * @STA_MODIFY_KEY: this command modifies %key
+ * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @STA_MODIFY_TX_RATE: unused
+ * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @STA_MODIFY_PROT_TH:
+ * @STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+enum iwl_sta_modify_flag {
+ STA_MODIFY_KEY = BIT(0),
+ STA_MODIFY_TID_DISABLE_TX = BIT(1),
+ STA_MODIFY_TX_RATE = BIT(2),
+ STA_MODIFY_ADD_BA_TID = BIT(3),
+ STA_MODIFY_REMOVE_BA_TID = BIT(4),
+ STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
+ STA_MODIFY_PROT_TH = BIT(6),
+ STA_MODIFY_QUEUES = BIT(7),
+};
+
+#define STA_MODE_MODIFY 1
+
+/**
+ * enum iwl_sta_sleep_flag - type of sleep of the station
+ * @STA_SLEEP_STATE_AWAKE:
+ * @STA_SLEEP_STATE_PS_POLL:
+ * @STA_SLEEP_STATE_UAPSD:
+ */
+enum iwl_sta_sleep_flag {
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+};
+
+/* STA ID and color bits definitions */
+#define STA_ID_SEED (0x0f)
+#define STA_ID_POS (0)
+#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
+
+#define STA_COLOR_SEED (0x7)
+#define STA_COLOR_POS (4)
+#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
+
+#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+ (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
+#define STA_ID_N_COLOR_GET_ID(id_n_color) \
+ (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
+
+#define STA_KEY_MAX_NUM (16)
+#define STA_KEY_IDX_INVALID (0xff)
+#define STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWL_MAX_GLOBAL_KEYS (4)
+#define STA_KEY_LEN_WEP40 (5)
+#define STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwl_mvm_keyinfo - key information
+ * @key_flags: type %iwl_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwl_mvm_keyinfo {
+ __le16 key_flags;
+ u8 tkip_rx_tsc_byte2;
+ u8 reserved1;
+ __le16 tkip_rx_ttak[5];
+ u8 key_offset;
+ u8 reserved2;
+ u8 key[16];
+ __le64 tx_secur_seq_cnt;
+ __le64 hw_tkip_mic_rx_key;
+ __le64 hw_tkip_mic_tx_key;
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
+ * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
+ * sent
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @key: look at %iwl_mvm_keyinfo
+ * @station_flags: look at %iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd {
+ u8 add_modify;
+ u8 unicast_tx_key_id;
+ u8 multicast_tx_key_id;
+ u8 reserved1;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN];
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ struct iwl_mvm_keyinfo key;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ __le16 tid_disable_tx;
+ __le16 reserved4;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_5 */
+
+/**
+ * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
+ * @ADD_STA_SUCCESS: operation was executed successfully
+ * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
+ * doesn't exist.
+ */
+enum iwl_mvm_add_sta_rsp_status {
+ ADD_STA_SUCCESS = 0x1,
+ ADD_STA_STATIONS_OVERLOAD = 0x2,
+ ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
+ ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
+};
+
+/**
+ * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwl_mvm_rm_sta_cmd {
+ u8 sta_id;
+ u8 reserved[3];
+} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwl_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd {
+ __le32 ctrl_flags;
+ u8 IGTK[16];
+ u8 K1[16];
+ u8 K2[16];
+ __le32 key_id;
+ __le32 sta_id;
+ __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwl_mvm_wep_key {
+ u8 key_index;
+ u8 key_offset;
+ __le16 reserved1;
+ u8 key_size;
+ u8 reserved2[3];
+ u8 key[16];
+} __packed;
+
+struct iwl_mvm_wep_key_cmd {
+ __le32 mac_id_n_color;
+ u8 num_keys;
+ u8 decryption_type;
+ u8 flags;
+ u8 reserved;
+ struct iwl_mvm_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+
+#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
new file mode 100644
index 00000000000..2677914bf0a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -0,0 +1,580 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_tx_h__
+#define __fw_api_tx_h__
+
+/**
+ * enum iwl_tx_flags - bitmasks for tx_flags in TX command
+ * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ * Otherwise, use rate_n_flags from the TX command
+ * @TX_CMD_FLG_BA: this frame is a block ack
+ * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ * Must set TX_CMD_FLG_ACK with this flag.
+ * @TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection
+ * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame
+ * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ * Should be set for beacons and probe responses
+ * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @TX_CMD_FLG_AGG_START: allow this frame to start aggregation
+ * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ * Should be set for 26/30 length MAC headers
+ * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
+ * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
+ * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+enum iwl_tx_flags {
+ TX_CMD_FLG_PROT_REQUIRE = BIT(0),
+ TX_CMD_FLG_ACK = BIT(3),
+ TX_CMD_FLG_STA_RATE = BIT(4),
+ TX_CMD_FLG_BA = BIT(5),
+ TX_CMD_FLG_BAR = BIT(6),
+ TX_CMD_FLG_TXOP_PROT = BIT(7),
+ TX_CMD_FLG_VHT_NDPA = BIT(8),
+ TX_CMD_FLG_HT_NDPA = BIT(9),
+ TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
+ TX_CMD_FLG_BT_DIS = BIT(12),
+ TX_CMD_FLG_SEQ_CTL = BIT(13),
+ TX_CMD_FLG_MORE_FRAG = BIT(14),
+ TX_CMD_FLG_NEXT_FRAME = BIT(15),
+ TX_CMD_FLG_TSF = BIT(16),
+ TX_CMD_FLG_CALIB = BIT(17),
+ TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
+ TX_CMD_FLG_AGG_START = BIT(19),
+ TX_CMD_FLG_MH_PAD = BIT(20),
+ TX_CMD_FLG_RESP_TO_DRV = BIT(21),
+ TX_CMD_FLG_CCMP_AGG = BIT(22),
+ TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
+ TX_CMD_FLG_CTS_ONLY = BIT(24),
+ TX_CMD_FLG_DUR = BIT(25),
+ TX_CMD_FLG_FW_DROP = BIT(26),
+ TX_CMD_FLG_EXEC_PAPD = BIT(27),
+ TX_CMD_FLG_PAPD_TYPE = BIT(28),
+ TX_CMD_FLG_HCCA_CHUNK = BIT(31)
+}; /* TX_FLAGS_BITS_API_S_VER_1 */
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP 0x01
+#define TX_CMD_SEC_CCM 0x02
+#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+#define TX_CMD_SEC_KEY128 0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
+#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
+#define TX_CMD_NEXT_FRAME_BA_MSK (0x20)
+#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
+#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
+#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
+#define TX_CMD_NEXT_FRAME_STA_ID_POS (8)
+#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
+#define TX_CMD_NEXT_FRAME_RATE_POS (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWL_DEFAULT_TX_RETRY 15
+#define IWL_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWL_RTS_DFAULT_RETRY_LIMIT 60
+#define IWL_BAR_DFAULT_RETRY_LIMIT 60
+#define IWL_LOW_RETRY_LIMIT 7
+
+/* TODO: complete documentation for try_cnt and btkill_cnt */
+/**
+ * struct iwl_tx_cmd - TX command struct to FW
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @next_frame_len: same as len, but for next frame (0 if not applicable)
+ * Used for fragmentation and bursting, but not in 11n aggregation.
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @sec_ctl: security control, TX_CMD_SEC_*
+ * @initial_rate_index: index into the the rate table for initial TX attempt.
+ * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @key: security key
+ * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
+ * @life_time: frame life time (usecs??)
+ * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
+ * btkill_cnd + reserved), first 32 bits. "0" disables usage.
+ * @dram_msb_ptr: upper bits of the scratch physical address
+ * @rts_retry_limit: max attempts for RTS
+ * @data_retry_limit: max attempts to send the data packet
+ * @tid_spec: TID/tspec
+ * @pm_frame_timeout: PM TX frame timeout
+ * @driver_txop: duration od EDCA TXOP, in 32-usec units. Set this if not
+ * specified by HCCA protocol
+ *
+ * The byte count (both len and next_frame_len) includes MAC header
+ * (24/26/30/32 bytes)
+ * + 2 bytes pad if 26/30 header size
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * It does not include post-MAC padding, i.e.,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
+ * Range of len: 14-2342 bytes.
+ *
+ * After the struct fields the MAC header is placed, plus any padding,
+ * and then the actial payload.
+ */
+struct iwl_tx_cmd {
+ __le16 len;
+ __le16 next_frame_len;
+ __le32 tx_flags;
+ /* DRAM_SCRATCH_API_U_VER_1 */
+ u8 try_cnt;
+ u8 btkill_cnt;
+ __le16 reserved;
+ __le32 rate_n_flags;
+ u8 sta_id;
+ u8 sec_ctl;
+ u8 initial_rate_index;
+ u8 reserved2;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved3;
+ __le32 life_time;
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 tid_tspec;
+ __le16 pm_frame_timeout;
+ __le16 driver_txop;
+ u8 payload[0];
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_3 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
+ * @TX_STATUS_SUCCESS:
+ * @TX_STATUS_DIRECT_DONE:
+ * @TX_STATUS_POSTPONE_DELAY:
+ * @TX_STATUS_POSTPONE_FEW_BYTES:
+ * @TX_STATUS_POSTPONE_BT_PRIO:
+ * @TX_STATUS_POSTPONE_QUIET_PERIOD:
+ * @TX_STATUS_POSTPONE_CALC_TTAK:
+ * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ * @TX_STATUS_FAIL_SHORT_LIMIT:
+ * @TX_STATUS_FAIL_LONG_LIMIT:
+ * @TX_STATUS_FAIL_UNDERRUN:
+ * @TX_STATUS_FAIL_DRAIN_FLOW:
+ * @TX_STATUS_FAIL_RFKILL_FLUSH:
+ * @TX_STATUS_FAIL_LIFE_EXPIRE:
+ * @TX_STATUS_FAIL_DEST_PS:
+ * @TX_STATUS_FAIL_HOST_ABORTED:
+ * @TX_STATUS_FAIL_BT_RETRY:
+ * @TX_STATUS_FAIL_STA_INVALID:
+ * @TX_TATUS_FAIL_FRAG_DROPPED:
+ * @TX_STATUS_FAIL_TID_DISABLE:
+ * @TX_STATUS_FAIL_FIFO_FLUSHED:
+ * @TX_STATUS_FAIL_SMALL_CF_POLL:
+ * @TX_STATUS_FAIL_FW_DROP:
+ * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * @TX_FRAME_STATUS_INTERNAL_ABORT:
+ * @TX_MODE_MSK:
+ * @TX_MODE_NO_BURST:
+ * @TX_MODE_IN_BURST_SEQ:
+ * @TX_MODE_FIRST_IN_BURST:
+ * @TX_QUEUE_NUM_MSK:
+ *
+ * Valid only if frame_count =1
+ * TODO: complete documentation
+ */
+enum iwl_tx_status {
+ TX_STATUS_MSK = 0x000000ff,
+ TX_STATUS_SUCCESS = 0x01,
+ TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+ TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_STATUS_FAIL_UNDERRUN = 0x84,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+ TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_STATUS_FAIL_DEST_PS = 0x88,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+ TX_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+ TX_STATUS_FAIL_FW_DROP = 0x90,
+ TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+ TX_STATUS_INTERNAL_ABORT = 0x92,
+ TX_MODE_MSK = 0x00000f00,
+ TX_MODE_NO_BURST = 0x00000000,
+ TX_MODE_IN_BURST_SEQ = 0x00000100,
+ TX_MODE_FIRST_IN_BURST = 0x00000200,
+ TX_QUEUE_NUM_MSK = 0x0001f000,
+ TX_NARROW_BW_MSK = 0x00060000,
+ TX_NARROW_BW_1DIV2 = 0x00020000,
+ TX_NARROW_BW_1DIV4 = 0x00040000,
+ TX_NARROW_BW_1DIV8 = 0x00060000,
+};
+
+/*
+ * enum iwl_tx_agg_status - TX aggregation status
+ * @AGG_TX_STATE_STATUS_MSK:
+ * @AGG_TX_STATE_TRANSMITTED:
+ * @AGG_TX_STATE_UNDERRUN:
+ * @AGG_TX_STATE_BT_PRIO:
+ * @AGG_TX_STATE_FEW_BYTES:
+ * @AGG_TX_STATE_ABORT:
+ * @AGG_TX_STATE_LAST_SENT_TTL:
+ * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
+ * @AGG_TX_STATE_LAST_SENT_BT_KILL:
+ * @AGG_TX_STATE_SCD_QUERY:
+ * @AGG_TX_STATE_TEST_BAD_CRC32:
+ * @AGG_TX_STATE_RESPONSE:
+ * @AGG_TX_STATE_DUMP_TX:
+ * @AGG_TX_STATE_DELAY_TX:
+ * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ *
+ * TODO: complete documentation
+ */
+enum iwl_tx_agg_status {
+ AGG_TX_STATE_STATUS_MSK = 0x00fff,
+ AGG_TX_STATE_TRANSMITTED = 0x000,
+ AGG_TX_STATE_UNDERRUN = 0x001,
+ AGG_TX_STATE_BT_PRIO = 0x002,
+ AGG_TX_STATE_FEW_BYTES = 0x004,
+ AGG_TX_STATE_ABORT = 0x008,
+ AGG_TX_STATE_LAST_SENT_TTL = 0x010,
+ AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+ AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+ AGG_TX_STATE_SCD_QUERY = 0x080,
+ AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+ AGG_TX_STATE_RESPONSE = 0x1ff,
+ AGG_TX_STATE_DUMP_TX = 0x200,
+ AGG_TX_STATE_DELAY_TX = 0x400,
+ AGG_TX_STATE_TRY_CNT_POS = 12,
+ AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
+};
+
+#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \
+ AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+ AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
+ AGG_TX_STATE_ABORT | \
+ AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct agg_tx_status - per packet TX aggregation status
+ * @status: enum iwl_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct agg_tx_status {
+ __le16 status;
+ __le16 sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status TX_STATUS_*
+ * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp {
+ u8 frame_count;
+ u8 bt_kill_count;
+ u8 failure_rts;
+ u8 failure_frame;
+ __le32 initial_rate;
+ __le16 wireless_media_time;
+
+ u8 pa_status;
+ u8 pa_integ_res_a[3];
+ u8 pa_integ_res_b[3];
+ u8 pa_integ_res_c[3];
+ __le16 measurement_req_id;
+ __le16 reserved;
+
+ __le32 tfd_info;
+ __le16 seq_ctl;
+ __le16 byte_cnt;
+ u8 tlc_info;
+ u8 ra_tid;
+ __le16 frame_ctrl;
+
+ struct agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_mvm_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @sta_addr_lo32: lower 32 bits of the MAC address
+ * @sta_addr_hi16: upper 16 bits of the MAC address
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl:
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ */
+struct iwl_mvm_ba_notif {
+ __le32 sta_addr_lo32;
+ __le16 sta_addr_hi16;
+ __le16 reserved;
+
+ u8 sta_id;
+ u8 tid;
+ __le16 seq_ctl;
+ __le64 bitmap;
+ __le16 scd_flow;
+ __le16 scd_ssn;
+ u8 txed;
+ u8 txed_2_done;
+ __le16 reserved1;
+} __packed;
+
+/*
+ * struct iwl_mac_beacon_cmd - beacon template command
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ * mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd {
+ struct iwl_tx_cmd tx;
+ __le32 template_id;
+ __le32 tim_idx;
+ __le32 tim_size;
+ struct ieee80211_hdr frame[0];
+} __packed;
+
+/**
+ * enum iwl_dump_control - dump (flush) control flags
+ * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ * and the TFD queues are empty.
+ */
+enum iwl_dump_control {
+ DUMP_TX_FIFO_FLUSH = BIT(1),
+};
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+ __le32 queues_ctl;
+ __le16 flush_ctl;
+ __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & 0xfff;
+}
+
+#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
new file mode 100644
index 00000000000..23eebda848b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -0,0 +1,952 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_h__
+#define __fw_api_h__
+
+#include "fw-api-rs.h"
+#include "fw-api-tx.h"
+#include "fw-api-sta.h"
+#include "fw-api-mac.h"
+#include "fw-api-power.h"
+#include "fw-api-d3.h"
+
+/* queue and FIFO numbers by usage */
+enum {
+ IWL_MVM_OFFCHANNEL_QUEUE = 8,
+ IWL_MVM_CMD_QUEUE = 9,
+ IWL_MVM_AUX_QUEUE = 15,
+ IWL_MVM_FIRST_AGG_QUEUE = 16,
+ IWL_MVM_NUM_QUEUES = 20,
+ IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
+ IWL_MVM_CMD_FIFO = 7
+};
+
+#define IWL_MVM_STATION_COUNT 16
+
+/* commands */
+enum {
+ MVM_ALIVE = 0x1,
+ REPLY_ERROR = 0x2,
+
+ INIT_COMPLETE_NOTIF = 0x4,
+
+ /* PHY context commands */
+ PHY_CONTEXT_CMD = 0x8,
+ DBG_CFG = 0x9,
+
+ /* station table */
+ ADD_STA = 0x18,
+ REMOVE_STA = 0x19,
+
+ /* TX */
+ TX_CMD = 0x1c,
+ TXPATH_FLUSH = 0x1e,
+ MGMT_MCAST_KEY = 0x1f,
+
+ /* global key */
+ WEP_KEY = 0x20,
+
+ /* MAC and Binding commands */
+ MAC_CONTEXT_CMD = 0x28,
+ TIME_EVENT_CMD = 0x29, /* both CMD and response */
+ TIME_EVENT_NOTIFICATION = 0x2a,
+ BINDING_CONTEXT_CMD = 0x2b,
+ TIME_QUOTA_CMD = 0x2c,
+
+ LQ_CMD = 0x4e,
+
+ /* Calibration */
+ TEMPERATURE_NOTIFICATION = 0x62,
+ CALIBRATION_CFG_CMD = 0x65,
+ CALIBRATION_RES_NOTIFICATION = 0x66,
+ CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
+ RADIO_VERSION_NOTIFICATION = 0x68,
+
+ /* Scan offload */
+ SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+ SCAN_OFFLOAD_ABORT_CMD = 0x52,
+ SCAN_OFFLOAD_COMPLETE = 0x6D,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+ SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+
+ /* Phy */
+ PHY_CONFIGURATION_CMD = 0x6a,
+ CALIB_RES_NOTIF_PHY_DB = 0x6b,
+ /* PHY_DB_CMD = 0x6c, */
+
+ /* Power */
+ POWER_TABLE_CMD = 0x77,
+
+ /* Scanning */
+ SCAN_REQUEST_CMD = 0x80,
+ SCAN_ABORT_CMD = 0x81,
+ SCAN_START_NOTIFICATION = 0x82,
+ SCAN_RESULTS_NOTIFICATION = 0x83,
+ SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+ /* NVM */
+ NVM_ACCESS_CMD = 0x88,
+
+ SET_CALIB_DEFAULT_CMD = 0x8e,
+
+ BEACON_TEMPLATE_CMD = 0x91,
+ TX_ANT_CONFIGURATION_CMD = 0x98,
+ STATISTICS_NOTIFICATION = 0x9d,
+
+ /* RF-KILL commands and notifications */
+ CARD_STATE_CMD = 0xa0,
+ CARD_STATE_NOTIFICATION = 0xa1,
+
+ REPLY_RX_PHY_CMD = 0xc0,
+ REPLY_RX_MPDU_CMD = 0xc1,
+ BA_NOTIF = 0xc5,
+
+ REPLY_DEBUG_CMD = 0xf0,
+ DEBUG_LOG_MSG = 0xf7,
+
+ /* D3 commands/notifications */
+ D3_CONFIG_CMD = 0xd3,
+ PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+ OFFLOADS_QUERY_CMD = 0xd5,
+ REMOTE_WAKE_CONFIG_CMD = 0xd6,
+
+ /* for WoWLAN in particular */
+ WOWLAN_PATTERNS = 0xe0,
+ WOWLAN_CONFIGURATION = 0xe1,
+ WOWLAN_TSC_RSC_PARAM = 0xe2,
+ WOWLAN_TKIP_PARAM = 0xe3,
+ WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+ WOWLAN_GET_STATUSES = 0xe5,
+ WOWLAN_TX_POWER_PER_DB = 0xe6,
+
+ /* and for NetDetect */
+ NET_DETECT_CONFIG_CMD = 0x54,
+ NET_DETECT_PROFILES_QUERY_CMD = 0x56,
+ NET_DETECT_PROFILES_CMD = 0x57,
+ NET_DETECT_HOTSPOTS_CMD = 0x58,
+ NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
+
+ REPLY_MAX = 0xff,
+};
+
+/**
+ * struct iwl_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwl_cmd_response {
+ __le32 status;
+};
+
+/*
+ * struct iwl_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwl_tx_ant_cfg_cmd {
+ __le32 valid;
+} __packed;
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
+ */
+enum iwl_calib_cfg {
+ IWL_CALIB_CFG_XTAL_IDX = BIT(0),
+ IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
+ IWL_CALIB_CFG_PAPD_IDX = BIT(3),
+ IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
+ IWL_CALIB_CFG_DC_IDX = BIT(5),
+ IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
+ IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
+ IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
+ IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
+ IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
+ IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
+ IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
+ IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
+ IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
+ IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
+ IWL_CALIB_CFG_DAC_IDX = BIT(16),
+ IWL_CALIB_CFG_ABS_IDX = BIT(17),
+ IWL_CALIB_CFG_AGC_IDX = BIT(18),
+};
+
+/*
+ * Phy configuration command.
+ */
+struct iwl_phy_cfg_cmd {
+ __le32 phy_cfg;
+ struct iwl_calib_ctrl calib_control;
+} __packed;
+
+#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1))
+#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3))
+#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5))
+#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
+#define PHY_CFG_TX_CHAIN_A BIT(8)
+#define PHY_CFG_TX_CHAIN_B BIT(9)
+#define PHY_CFG_TX_CHAIN_C BIT(10)
+#define PHY_CFG_RX_CHAIN_A BIT(12)
+#define PHY_CFG_RX_CHAIN_B BIT(13)
+#define PHY_CFG_RX_CHAIN_C BIT(14)
+
+
+/* Target of the NVM_ACCESS_CMD */
+enum {
+ NVM_ACCESS_TARGET_CACHE = 0,
+ NVM_ACCESS_TARGET_OTP = 1,
+ NVM_ACCESS_TARGET_EEPROM = 2,
+};
+
+/**
+ * struct iwl_nvm_access_cmd_ver1 - Request the device to send the NVM.
+ * @op_code: 0 - read, 1 - write.
+ * @target: NVM_ACCESS_TARGET_*. should be 0 for read.
+ * @cache_refresh: 0 - None, 1- NVM.
+ * @offset: offset in the nvm data.
+ * @length: of the chunk.
+ * @data: empty on read, the NVM chunk on write
+ */
+struct iwl_nvm_access_cmd_ver1 {
+ u8 op_code;
+ u8 target;
+ u8 cache_refresh;
+ u8 reserved;
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_nvm_access_resp_ver1 - response to NVM_ACCESS_CMD
+ * @offset: the offset in the nvm data
+ * @length: of the chunk
+ * @data: the nvm chunk on when NVM_ACCESS_CMD was read, nothing on write
+ */
+struct iwl_nvm_access_resp_ver1 {
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_1 */
+
+/* Section types for NVM_ACCESS_CMD version 2 */
+enum {
+ NVM_SECTION_TYPE_HW = 0,
+ NVM_SECTION_TYPE_SW,
+ NVM_SECTION_TYPE_PAPD,
+ NVM_SECTION_TYPE_BT,
+ NVM_SECTION_TYPE_CALIBRATION,
+ NVM_SECTION_TYPE_PRODUCTION,
+ NVM_SECTION_TYPE_POST_FCS_CALIB,
+ NVM_NUM_OF_SECTIONS,
+};
+
+/**
+ * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
+ * @op_code: 0 - read, 1 - write
+ * @target: NVM_ACCESS_TARGET_*
+ * @type: NVM_SECTION_TYPE_*
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwl_nvm_access_cmd_ver2 {
+ u8 op_code;
+ u8 target;
+ __le16 type;
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwl_nvm_access_resp_ver2 {
+ __le16 offset;
+ __le16 length;
+ __le16 type;
+ __le16 status;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+/* MVM_ALIVE 0x1 */
+
+/* alive response is_valid values */
+#define ALIVE_RESP_UCODE_OK BIT(0)
+#define ALIVE_RESP_RFKILL BIT(1)
+
+/* alive response ver_type values */
+enum {
+ FW_TYPE_HW = 0,
+ FW_TYPE_PROT = 1,
+ FW_TYPE_AP = 2,
+ FW_TYPE_WOWLAN = 3,
+ FW_TYPE_TIMING = 4,
+ FW_TYPE_WIPAN = 5
+};
+
+/* alive response ver_subtype values */
+enum {
+ FW_SUBTYPE_FULL_FEATURE = 0,
+ FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+ FW_SUBTYPE_REDUCED = 2,
+ FW_SUBTYPE_ALIVE_ONLY = 3,
+ FW_SUBTYPE_WOWLAN = 4,
+ FW_SUBTYPE_AP_SUBTYPE = 5,
+ FW_SUBTYPE_WIPAN = 6,
+ FW_SUBTYPE_INITIALIZE = 9
+};
+
+#define IWL_ALIVE_STATUS_ERR 0xDEAD
+#define IWL_ALIVE_STATUS_OK 0xCAFE
+
+#define IWL_ALIVE_FLG_RFKILL BIT(0)
+
+struct mvm_alive_resp {
+ __le16 status;
+ __le16 flags;
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 id;
+ u8 api_minor;
+ u8 api_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le16 reserved2;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+} __packed; /* ALIVE_RES_API_S_VER_1 */
+
+/* Error response/notification */
+enum {
+ FW_ERR_UNKNOWN_CMD = 0x0,
+ FW_ERR_INVALID_CMD_PARAM = 0x1,
+ FW_ERR_SERVICE = 0x2,
+ FW_ERR_ARC_MEMORY = 0x3,
+ FW_ERR_ARC_CODE = 0x4,
+ FW_ERR_WATCH_DOG = 0x5,
+ FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+ FW_ERR_WEP_KEY_SIZE = 0x11,
+ FW_ERR_OBSOLETE_FUNC = 0x12,
+ FW_ERR_UNEXPECTED = 0xFE,
+ FW_ERR_FATAL = 0xFF
+};
+
+/**
+ * struct iwl_error_resp - FW error indication
+ * ( REPLY_ERROR = 0x2 )
+ * @error_type: one of FW_ERR_*
+ * @cmd_id: the command ID for which the error occured
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwl_error_resp {
+ __le32 error_type;
+ u8 cmd_id;
+ u8 reserved1;
+ __le16 bad_cmd_seq_num;
+ __le32 error_service;
+ __le64 timestamp;
+} __packed;
+
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define MAX_MACS_IN_BINDING (3)
+#define MAX_BINDINGS (4)
+#define AUX_BINDING_INDEX (3)
+#define MAX_PHYS (4)
+
+/* Used to extract ID and color from the context dword */
+#define FW_CTXT_ID_POS (0)
+#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS)
+#define FW_CTXT_COLOR_POS (8)
+#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
+#define FW_CTXT_INVALID (0xffffffff)
+
+#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
+ (_color << FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+enum {
+ FW_CTXT_ACTION_STUB = 0,
+ FW_CTXT_ACTION_ADD,
+ FW_CTXT_ACTION_MODIFY,
+ FW_CTXT_ACTION_REMOVE,
+ FW_CTXT_ACTION_NUM
+}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+enum iwl_time_event_type {
+ /* BSS Station Events */
+ TE_BSS_STA_AGGRESSIVE_ASSOC,
+ TE_BSS_STA_ASSOC,
+ TE_BSS_EAP_DHCP_PROT,
+ TE_BSS_QUIET_PERIOD,
+
+ /* P2P Device Events */
+ TE_P2P_DEVICE_DISCOVERABLE,
+ TE_P2P_DEVICE_LISTEN,
+ TE_P2P_DEVICE_ACTION_SCAN,
+ TE_P2P_DEVICE_FULL_SCAN,
+
+ /* P2P Client Events */
+ TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+ TE_P2P_CLIENT_ASSOC,
+ TE_P2P_CLIENT_QUIET_PERIOD,
+
+ /* P2P GO Events */
+ TE_P2P_GO_ASSOC_PROT,
+ TE_P2P_GO_REPETITIVE_NOA,
+ TE_P2P_GO_CT_WINDOW,
+
+ /* WiDi Sync Events */
+ TE_WIDI_TX_SYNC,
+
+ TE_MAX
+}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+/* Time Event dependencies: none, on another TE, or in a specific time */
+enum {
+ TE_INDEPENDENT = 0,
+ TE_DEP_OTHER = 1,
+ TE_DEP_TSF = 2,
+ TE_EVENT_SOCIOPATHIC = 4,
+}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
+/* When to send Time Event notifications and to whom (internal = FW) */
+enum {
+ TE_NOTIF_NONE = 0,
+ TE_NOTIF_HOST_START = 0x1,
+ TE_NOTIF_HOST_END = 0x2,
+ TE_NOTIF_INTERNAL_START = 0x4,
+ TE_NOTIF_INTERNAL_END = 0x8
+}; /* MAC_EVENT_ACTION_API_E_VER_1 */
+
+/*
+ * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
+ * of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ TE_FRAG_NONE = 0,
+ TE_FRAG_SINGLE = 1,
+ TE_FRAG_DUAL = 2,
+ TE_FRAG_ENDLESS = 0xffffffff
+};
+
+/* Repeat the time event endlessly (until removed) */
+#define TE_REPEAT_ENDLESS (0xffffffff)
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_REPEAT_MAX_MSK (0x0fffffff)
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_FRAG_MAX_MSK (0x0fffffff)
+
+/**
+ * struct iwl_time_event_cmd - configuring Time Events
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using TE_NOTIF_* (whom to notify when)
+ */
+struct iwl_time_event_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 id;
+ /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ __le32 apply_time;
+ __le32 max_delay;
+ __le32 dep_policy;
+ __le32 depends_on;
+ __le32 is_present;
+ __le32 max_frags;
+ __le32 interval;
+ __le32 interval_reciprocal;
+ __le32 duration;
+ __le32 repeat;
+ __le32 notify;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwl_time_event_resp {
+ __le32 status;
+ __le32 id;
+ __le32 unique_id;
+ __le32 id_and_color;
+} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_notif - notifications of time event start/stop
+ * ( TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of TE_NOTIF_START or TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwl_time_event_notif {
+ __le32 timestamp;
+ __le32 session_id;
+ __le32 unique_id;
+ __le32 id_and_color;
+ __le32 action;
+ __le32 status;
+} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwl_binding_cmd - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ */
+struct iwl_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* BINDING_DATA_API_S_VER_1 */
+ __le32 macs[MAX_MACS_IN_BINDING];
+ __le32 phy;
+} __packed; /* BINDING_CMD_API_S_VER_1 */
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWL_MVM_MAX_QUOTA 128
+
+/**
+ * struct iwl_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwl_time_quota_data {
+ __le32 id_and_color;
+ __le32 quota;
+ __le32 max_duration;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwl_time_quota_cmd {
+ struct iwl_time_quota_data quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define PHY_BAND_5 (0)
+#define PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define PHY_VHT_CHANNEL_MODE20 (0x0)
+#define PHY_VHT_CHANNEL_MODE40 (0x1)
+#define PHY_VHT_CHANNEL_MODE80 (0x2)
+#define PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * @band: PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwl_fw_channel_info {
+ u8 band;
+ u8 channel;
+ u8 width;
+ u8 ctrl_pos;
+} __packed;
+
+#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define PHY_RX_CHAIN_VALID_POS (1)
+#define PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << PHY_RX_CHAIN_VALID_POS)
+#define PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define PHY_RX_CHAIN_CNT_POS (10)
+#define PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << PHY_RX_CHAIN_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define NUM_PHY_CTX 3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* PHY_CONTEXT_DATA_API_S_VER_1 */
+ __le32 apply_time;
+ __le32 tx_param_color;
+ struct iwl_fw_channel_info ci;
+ __le32 txchain_info;
+ __le32 rxchain_info;
+ __le32 acquisition_data;
+ __le32 dsp_cfg_flags;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+
+#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_AGC_IDX 1
+#define IWL_RX_INFO_RSSI_AB_IDX 2
+#define IWL_RX_INFO_RSSI_C_IDX 3
+#define IWL_OFDM_AGC_DB_MSK 0xfe00
+#define IWL_OFDM_AGC_DB_POS 9
+#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWL_OFDM_RSSI_A_POS 0
+#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWL_OFDM_RSSI_B_POS 16
+#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
+#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
+#define IWL_OFDM_RSSI_C_POS 0
+
+/**
+ * struct iwl_rx_phy_info - phy info
+ * (REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwl_rx_phy_info {
+ u8 non_cfg_phy_cnt;
+ u8 cfg_phy_cnt;
+ u8 stat_id;
+ u8 reserved1;
+ __le32 system_timestamp;
+ __le64 timestamp;
+ __le32 beacon_time_stamp;
+ __le16 phy_flags;
+ __le16 channel;
+ __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+ __le32 rate_n_flags;
+ __le32 byte_count;
+ __le16 reserved2;
+ __le16 frame_time;
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+ __le16 byte_count;
+ __le16 reserved;
+} __packed;
+
+/**
+ * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
+ * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @RX_RES_PHY_FLAGS_MOD_CCK:
+ * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwl_rx_phy_flags {
+ RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
+ RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
+ RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
+ RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
+ RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
+ RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
+ RX_RES_PHY_FLAGS_AGG = BIT(7),
+ RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
+ RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
+ RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
+};
+
+/**
+ * enum iwl_mvm_rx_status - written by fw for each Rx packet
+ * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @RX_MPDU_RES_STATUS_KEY_VALID:
+ * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @RX_MPDU_RES_STATUS_RRF_KILL:
+ * @RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+enum iwl_mvm_rx_status {
+ RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
+ RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
+ RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
+ RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
+ RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
+ RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
+ RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
+ RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+ RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
+ RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
+ RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
+ RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
+ RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
+ RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
+ RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12),
+ RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
+ RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
+ RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
+ RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
+ RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
+ RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
+ RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
+ RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwl_radio_version_notif {
+ __le32 radio_flavor;
+ __le32 radio_step;
+ __le32 radio_dash;
+} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+enum iwl_card_state_flags {
+ CARD_ENABLED = 0x00,
+ HW_CARD_DISABLED = 0x01,
+ SW_CARD_DISABLED = 0x02,
+ CT_KILL_CARD_DISABLED = 0x04,
+ HALT_CARD_DISABLED = 0x08,
+ CARD_DISABLED_MSK = 0x0f,
+ CARD_IS_RX_ON = 0x10,
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwl_card_state_flags
+ */
+struct iwl_card_state_notif {
+ __le32 flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_set_calib_default_cmd - set default value for calibration.
+ * ( SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwl_set_calib_default_cmd {
+ __le16 calib_index;
+ __le16 length;
+ u8 data[0];
+} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
+
+#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
new file mode 100644
index 00000000000..d3d959db03a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -0,0 +1,640 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-fw.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-eeprom-parse.h"
+
+#include "mvm.h"
+#include "iwl-phy-db.h"
+
+#define MVM_UCODE_ALIVE_TIMEOUT HZ
+#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+
+/* Default calibration values for WkP - set to INIT image w/o running */
+static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
+ 0x00, 0x18, 0x00 };
+static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f };
+static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
+ 0x00 };
+static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
+static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
+static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
+
+struct iwl_calib_default_data {
+ u16 size;
+ void *data;
+};
+
+#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
+
+static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
+ [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
+ [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
+ [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
+ [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
+ [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
+ [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
+ [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
+};
+
+struct iwl_mvm_alive_data {
+ bool valid;
+ u32 scd_base_addr;
+};
+
+static inline const struct fw_img *
+iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
+{
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+
+ return &mvm->fw->img[ucode_type];
+}
+
+static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ IWL_DEBUG_HC(mvm, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
+ sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_alive_data *alive_data = data;
+ struct mvm_alive_resp *palive;
+
+ palive = (void *)pkt->data;
+
+ mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+ alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype);
+
+ return true;
+}
+
+static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_phy_db *phy_db = data;
+
+ if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+ return true;
+ }
+
+ WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
+
+ return false;
+}
+
+static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+ enum iwl_ucode_type ucode_type)
+{
+ struct iwl_notification_wait alive_wait;
+ struct iwl_mvm_alive_data alive_data;
+ const struct fw_img *fw;
+ int ret, i;
+ enum iwl_ucode_type old_type = mvm->cur_ucode;
+ static const u8 alive_cmd[] = { MVM_ALIVE };
+
+ mvm->cur_ucode = ucode_type;
+ fw = iwl_get_ucode_image(mvm, ucode_type);
+
+ mvm->ucode_loaded = false;
+
+ if (!fw)
+ return -EINVAL;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
+ alive_cmd, ARRAY_SIZE(alive_cmd),
+ iwl_alive_fn, &alive_data);
+
+ ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
+ if (ret) {
+ mvm->cur_ucode = old_type;
+ iwl_remove_notification(&mvm->notif_wait, &alive_wait);
+ return ret;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the ALIVE notification here.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+ if (ret) {
+ mvm->cur_ucode = old_type;
+ return ret;
+ }
+
+ if (!alive_data.valid) {
+ IWL_ERR(mvm, "Loaded ucode is not valid!\n");
+ mvm->cur_ucode = old_type;
+ return -EIO;
+ }
+
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
+ /*
+ * Note: all the queues are enabled as part of the interface
+ * initialization, but in firmware restart scenarios they
+ * could be stopped, so wake them up. In firmware restart,
+ * mac80211 will have the queues stopped as well until the
+ * reconfiguration completes. During normal startup, they
+ * will be empty.
+ */
+
+ for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
+ if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+ mvm->queue_to_mac80211[i] = i;
+ else
+ mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+ atomic_set(&mvm->queue_stop_count[i], 0);
+ }
+
+ mvm->transport_queue_stop = 0;
+
+ mvm->ucode_loaded = true;
+
+ return 0;
+}
+#define IWL_HW_REV_ID_RAINBOW 0x2
+#define IWL_PROJ_TYPE_LHP 0x5
+
+static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_data *data = mvm->nvm_data;
+ /* Temp calls to static definitions, will be changed to CSR calls */
+ u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
+ u8 project_type = IWL_PROJ_TYPE_LHP;
+
+ return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
+ (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
+ (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
+}
+
+static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_phy_cfg_cmd phy_cfg_cmd;
+ enum iwl_ucode_type ucode_type = mvm->cur_ucode;
+
+ /* Set parameters */
+ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
+ phy_cfg_cmd.calib_control.event_trigger =
+ mvm->fw->default_calib[ucode_type].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ mvm->fw->default_calib[ucode_type].flow_trigger;
+
+ IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
+ phy_cfg_cmd.phy_cfg);
+
+ return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+/* Starting with the new PHY DB implementation - New calibs are enabled */
+/* Value - 0x405e7 */
+#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
+ IWL_CALIB_CFG_TEMPERATURE_IDX |\
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+ IWL_CALIB_CFG_DC_IDX |\
+ IWL_CALIB_CFG_BB_FILTER_IDX |\
+ IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
+ IWL_CALIB_CFG_TX_IQ_IDX |\
+ IWL_CALIB_CFG_RX_IQ_IDX |\
+ IWL_CALIB_CFG_AGC_IDX)
+
+#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
+
+/* Value 0x41567 */
+#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
+ IWL_CALIB_CFG_TEMPERATURE_IDX |\
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+ IWL_CALIB_CFG_BB_FILTER_IDX |\
+ IWL_CALIB_CFG_DC_IDX |\
+ IWL_CALIB_CFG_TX_IQ_IDX |\
+ IWL_CALIB_CFG_RX_IQ_IDX |\
+ IWL_CALIB_CFG_SENSITIVITY_IDX |\
+ IWL_CALIB_CFG_AGC_IDX)
+
+#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
+ IWL_CALIB_CFG_TEMPERATURE_IDX |\
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
+ IWL_CALIB_CFG_TX_PWR_IDX |\
+ IWL_CALIB_CFG_DC_IDX |\
+ IWL_CALIB_CFG_TX_IQ_IDX |\
+ IWL_CALIB_CFG_SENSITIVITY_IDX)
+
+/*
+ * Sets the calibrations trigger values that will be sent to the FW for runtime
+ * and init calibrations.
+ * The ones given in the FW TLV are not correct.
+ */
+static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
+{
+ struct iwl_tlv_calib_ctrl default_calib;
+
+ /*
+ * WkP FW TLV calib bits are wrong, overwrite them.
+ * This defines the dynamic calibrations which are implemented in the
+ * uCode both for init(flow) calculation and event driven calibs.
+ */
+
+ /* Init Image */
+ default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
+ default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
+
+ if (default_calib.event_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
+ IWL_ERR(mvm,
+ "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
+ default_calib.event_trigger);
+ if (default_calib.flow_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
+ IWL_ERR(mvm,
+ "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
+ default_calib.flow_trigger);
+
+ memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
+ &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
+ IWL_ERR(mvm,
+ "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
+ default_calib.event_trigger,
+ default_calib.flow_trigger);
+
+ /* Run time image */
+ default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
+ default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
+
+ if (default_calib.event_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
+ IWL_ERR(mvm,
+ "Updating the event calib for RT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
+ default_calib.event_trigger);
+ if (default_calib.flow_trigger !=
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
+ IWL_ERR(mvm,
+ "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
+ mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
+ default_calib.flow_trigger);
+
+ memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
+ &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
+ IWL_ERR(mvm,
+ "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
+ default_calib.event_trigger,
+ default_calib.flow_trigger);
+}
+
+static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
+{
+ u8 cmd_raw[16]; /* holds the variable size commands */
+ struct iwl_set_calib_default_cmd *cmd =
+ (struct iwl_set_calib_default_cmd *)cmd_raw;
+ int ret, i;
+
+ /* Setting default values for calibrations we don't run */
+ for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
+ u16 cmd_len;
+
+ if (wkp_calib_default_data[i].size == 0)
+ continue;
+
+ memset(cmd_raw, 0, sizeof(cmd_raw));
+ cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
+ cmd->calib_index = cpu_to_le16(i);
+ cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
+ if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
+ "Need to enlarge cmd_raw to %d\n", cmd_len))
+ break;
+ memcpy(cmd->data, wkp_calib_default_data[i].data,
+ wkp_calib_default_data[i].size);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
+ sizeof(*cmd) +
+ wkp_calib_default_data[i].size,
+ cmd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait calib_wait;
+ static const u8 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ CALIB_RES_NOTIF_PHY_DB
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->init_ucode_run)
+ return 0;
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &calib_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_phy_db_entry,
+ mvm->phy_db);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
+ goto error;
+ }
+
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ WARN_ON(ret);
+
+ /* Override the calibrations from TLV and the const of fw */
+ iwl_set_default_calib_trigger(mvm);
+
+ /* WkP doesn't have all calibrations, need to set default values */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ ret = iwl_set_default_calibrations(mvm);
+ if (ret)
+ goto error;
+ }
+
+ /*
+ * Send phy configurations command to init uCode
+ * to start the 16.0 uCode init image internal calibrations.
+ */
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+ ret);
+ goto error;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the calibration complete notification.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+ MVM_UCODE_CALIB_TIMEOUT);
+ if (!ret)
+ mvm->init_ucode_run = true;
+ goto out;
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+out:
+ if (!iwlmvm_mod_params.init_dbg) {
+ iwl_trans_stop_device(mvm->trans);
+ } else if (!mvm->nvm_data) {
+ /* we want to debug INIT and we have no NVM - fake */
+ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
+ sizeof(struct ieee80211_channel) +
+ sizeof(struct ieee80211_rate),
+ GFP_KERNEL);
+ if (!mvm->nvm_data)
+ return -ENOMEM;
+ mvm->nvm_data->valid_rx_ant = 1;
+ mvm->nvm_data->valid_tx_ant = 1;
+ mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
+ mvm->nvm_data->bands[0].n_channels = 1;
+ mvm->nvm_data->bands[0].n_bitrates = 1;
+ mvm->nvm_data->bands[0].bitrates =
+ (void *)mvm->nvm_data->channels + 1;
+ mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+ }
+
+ return ret;
+}
+
+#define UCODE_CALIB_TIMEOUT (2*HZ)
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ /* If we were in RFKILL during module loading, load init ucode now */
+ if (!mvm->init_ucode_run) {
+ ret = iwl_run_init_mvm_ucode(mvm, false);
+ if (ret && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+ goto error;
+ }
+ }
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+ if (ret)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+ /* Add auxiliary station for scanning */
+ ret = iwl_mvm_add_aux_sta(mvm);
+ if (ret)
+ goto error;
+
+ IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+
+ return 0;
+ error:
+ iwl_trans_stop_device(mvm->trans);
+ return ret;
+}
+
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
+{
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+ if (ret)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+ /* Add auxiliary station for scanning */
+ ret = iwl_mvm_add_aux_sta(mvm);
+ if (ret)
+ goto error;
+
+ return 0;
+ error:
+ iwl_trans_stop_device(mvm->trans);
+ return ret;
+}
+
+int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+ u32 flags = le32_to_cpu(card_state_notif->flags);
+
+ IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_KILL_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+
+ return 0;
+}
+
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
+
+ /* TODO: what to do with that? */
+ IWL_DEBUG_INFO(mvm,
+ "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
+ le32_to_cpu(radio_version->radio_flavor),
+ le32_to_cpu(radio_version->radio_step),
+ le32_to_cpu(radio_version->radio_dash));
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
new file mode 100644
index 00000000000..011906e73a0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -0,0 +1,134 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/leds.h>
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+
+/* Set led register on */
+static void iwl_mvm_led_enable(struct iwl_mvm *mvm)
+{
+ iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
+}
+
+/* Set led register off */
+static void iwl_mvm_led_disable(struct iwl_mvm *mvm)
+{
+ iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
+ if (brightness > 0)
+ iwl_mvm_led_enable(mvm);
+ else
+ iwl_mvm_led_disable(mvm);
+}
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ int mode = iwlwifi_mod_params.led_mode;
+ int ret;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ case IWL_LED_RF_STATE:
+ mode = IWL_LED_RF_STATE;
+ break;
+ case IWL_LED_DISABLE:
+ IWL_INFO(mvm, "Led disabled\n");
+ return 0;
+ default:
+ return -EINVAL;
+ };
+
+ mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mvm->hw->wiphy));
+ mvm->led.brightness_set = iwl_led_brightness_set;
+ mvm->led.max_brightness = 1;
+
+ if (mode == IWL_LED_RF_STATE)
+ mvm->led.default_trigger =
+ ieee80211_get_radio_led_name(mvm->hw);
+
+ ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+ if (ret) {
+ kfree(mvm->led.name);
+ IWL_INFO(mvm, "Failed to enable led\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+ if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
+ return;
+
+ led_classdev_unregister(&mvm->led);
+ kfree(mvm->led.name);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
new file mode 100644
index 00000000000..0854dc33888
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -0,0 +1,955 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+const u8 iwl_mvm_ac_to_tx_fifo[] = {
+ IWL_MVM_TX_FIFO_BK,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_VO,
+};
+
+struct iwl_mvm_mac_iface_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+ unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+ unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+ enum iwl_tsf_id preferred_tsf;
+ bool found_vif;
+};
+
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 ac;
+
+ /* Iterator may already find the interface being added -- skip it */
+ if (vif == data->vif) {
+ data->found_vif = true;
+ return;
+ }
+
+ /* Mark the queues used by the vif */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->hw_queue[ac], data->used_hw_queues);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->cab_queue, data->used_hw_queues);
+
+ /*
+ * Mark MAC IDs as used by clearing the available bit, and
+ * (below) mark TSFs as used if their existing use is not
+ * compatible with the new interface type.
+ * No locking or atomic bit operations are needed since the
+ * data is on the stack of the caller function.
+ */
+ __clear_bit(mvmvif->id, data->available_mac_ids);
+
+ /*
+ * The TSF is a hardware/firmware resource, there are 4 and
+ * the driver should assign and free them as needed. However,
+ * there are cases where 2 MACs should share the same TSF ID
+ * for the purpose of clock sync, an optimization to avoid
+ * clock drift causing overlapping TBTTs/DTIMs for a GO and
+ * client in the system.
+ *
+ * The firmware will decide according to the MAC type which
+ * will be the master and slave. Clients that need to sync
+ * with a remote station will be the master, and an AP or GO
+ * will be the slave.
+ *
+ * Depending on the new interface type it can be slaved to
+ * or become the master of an existing interface.
+ */
+ switch (data->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /*
+ * The new interface is client, so if the existing one
+ * we're iterating is an AP, the TSF should be used to
+ * avoid drift between the new client and existing AP,
+ * the existing AP will get drift updates from the new
+ * client context in this case
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (data->preferred_tsf == NUM_TSF_IDS &&
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /*
+ * The new interface is AP/GO, so should get drift
+ * updates from an existing client or use the same
+ * TSF as an existing GO. There's no drift between
+ * TSFs internally but if they used different TSFs
+ * then a new client MAC could update one of them
+ * and cause drift that way.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_AP) {
+ if (data->preferred_tsf == NUM_TSF_IDS &&
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+ default:
+ /*
+ * For all other interface types there's no need to
+ * take drift into account. Either they're exclusive
+ * like IBSS and monitor, or we don't care much about
+ * their TSF (like P2P Device), but we won't be able
+ * to share the TSF resource.
+ */
+ break;
+ }
+
+ /*
+ * Unless we exited above, we can't share the TSF resource
+ * that the virtual interface we're iterating over is using
+ * with the new one, so clear the available bit and if this
+ * was the preferred one, reset that as well.
+ */
+ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+
+ if (data->preferred_tsf == mvmvif->tsf_id)
+ data->preferred_tsf = NUM_TSF_IDS;
+}
+
+/*
+ * Get the mask of the queus used by the vif
+ */
+u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 qmask, ac;
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return BIT(IWL_OFFCHANNEL_QUEUE);
+
+ qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
+ BIT(vif->cab_queue) : 0;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ qmask |= BIT(vif->hw_queue[ac]);
+
+ return qmask;
+}
+
+static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ .used_hw_queues = {
+ BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+ BIT(IWL_MVM_AUX_QUEUE) |
+ BIT(IWL_MVM_CMD_QUEUE)
+ },
+ .found_vif = false,
+ };
+ u32 ac;
+ int ret;
+
+ /*
+ * Allocate a MAC ID and a TSF for this MAC, along with the queues
+ * and other resources.
+ */
+
+ /*
+ * Before the iterator, we start with all MAC IDs and TSFs available.
+ *
+ * During iteration, all MAC IDs are cleared that are in use by other
+ * virtual interfaces, and all TSF IDs are cleared that can't be used
+ * by this new virtual interface because they're used by an interface
+ * that can't share it with the new one.
+ * At the same time, we check if there's a preferred TSF in the case
+ * that we should share it with another interface.
+ */
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_iface_iterator, &data);
+
+ /*
+ * In the case we're getting here during resume, it's similar to
+ * firmware restart, and with RESUME_ALL the iterator will find
+ * the vif being added already.
+ * We don't want to reassign any IDs in either case since doing
+ * so would probably assign different IDs (as interfaces aren't
+ * necessarily added in the same order), but the old IDs were
+ * preserved anyway, so skip ID assignment for both resume and
+ * recovery.
+ */
+ if (data.found_vif)
+ return 0;
+
+ /* Therefore, in recovery, we can't get here */
+ WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+ mvmvif->id = find_first_bit(data.available_mac_ids,
+ NUM_MAC_INDEX_DRIVER);
+ if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+ if (mvmvif->tsf_id == NUM_TSF_IDS) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ mvmvif->color = 0;
+
+ /* No need to allocate data queues to P2P Device MAC.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+
+ return 0;
+ }
+
+ /* Find available queues, and allocate them to the ACs */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ u8 queue = find_first_zero_bit(data.used_hw_queues,
+ IWL_MVM_FIRST_AGG_QUEUE);
+
+ if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ IWL_ERR(mvm, "Failed to allocate queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ __set_bit(queue, data.used_hw_queues);
+ vif->hw_queue[ac] = queue;
+ }
+
+ /* Allocate the CAB queue for softAP and GO interfaces */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ u8 queue = find_first_zero_bit(data.used_hw_queues,
+ IWL_MVM_FIRST_AGG_QUEUE);
+
+ if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ IWL_ERR(mvm, "Failed to allocate cab queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ vif->cab_queue = queue;
+ } else {
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ }
+
+ mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ mvmvif->time_event_data.id = TE_MAX;
+
+ return 0;
+
+exit_fail:
+ memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+ memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ return ret;
+}
+
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ u32 ac;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
+ if (ret)
+ return ret;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_TX_FIFO_VO);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
+ IWL_MVM_TX_FIFO_VO);
+ /* fall through */
+ default:
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac],
+ iwl_mvm_ac_to_tx_fifo[ac]);
+ break;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_trans_txq_disable(mvm->trans, vif->cab_queue);
+ /* fall through */
+ default:
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]);
+ }
+}
+
+static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 *cck_rates, u8 *ofdm_rates)
+{
+ struct ieee80211_supported_band *sband;
+ unsigned long basic = vif->bss_conf.basic_rates;
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ u8 cck = 0;
+ u8 ofdm = 0;
+ int i;
+
+ sband = mvm->hw->wiphy->bands[band];
+
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ int hw = sband->bitrates[i].hw_value;
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+ if (lowest_present_ofdm > hw)
+ lowest_present_ofdm = hw;
+ } else {
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ cck |= BIT(hw);
+ if (lowest_present_cck > hw)
+ lowest_present_cck = hw;
+ }
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+ if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWL_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chanctx;
+ u8 cck_ack_rates, ofdm_ack_rates;
+ int i;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd->action = cpu_to_le32(action);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
+ else
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
+ break;
+ case NL80211_IFTYPE_AP:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+
+ memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+ if (vif->bss_conf.bssid)
+ memcpy(cmd->bssid_addr, vif->bss_conf.bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->bssid_addr);
+
+ rcu_read_lock();
+ chanctx = rcu_dereference(vif->chanctx_conf);
+ iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
+ : IEEE80211_BAND_2GHZ,
+ &cck_ack_rates, &ofdm_ack_rates);
+ rcu_read_unlock();
+
+ cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
+ cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
+
+ cmd->cck_short_preamble =
+ cpu_to_le32(vif->bss_conf.use_short_preamble ?
+ MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot =
+ cpu_to_le32(vif->bss_conf.use_short_slot ?
+ MAC_FLG_SHORT_SLOT : 0);
+
+ for (i = 0; i < AC_NUM; i++) {
+ cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
+ cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
+ cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs;
+ cmd->ac[i].edca_txop =
+ cpu_to_le16(mvmvif->queue_params[i].txop * 32);
+ cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
+ }
+
+ if (vif->bss_conf.qos)
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (vif->bss_conf.use_cts_prot)
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT |
+ MAC_PROT_FLG_SELF_CTS_EN);
+
+ /*
+ * I think that we should enable these 2 flags regardless the HT PROT
+ * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
+ */
+ if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
+ MAC_PROT_FLG_FAT_PROT);
+ }
+
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+}
+
+static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ sizeof(*cmd), cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
+ le32_to_cpu(cmd->action), ret);
+ return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type station or p2p client
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_data_sta *ctxt_sta)
+{
+ /* We need the dtim_period to set the MAC as associated */
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+ ctxt_sta->is_assoc = cpu_to_le32(1);
+ else
+ ctxt_sta->is_assoc = cpu_to_le32(0);
+
+ ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_sta->bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+ ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+ ctxt_sta->dtim_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period));
+
+ ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+ ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for station mode */
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for station mode */
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta);
+
+ cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+ /* No other data to be filled */
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+struct iwl_mvm_go_iterator_data {
+ bool go_active;
+};
+
+static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+ data->go_active = true;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mvm_go_iterator_data data = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC);
+
+ /*
+ * This flag should be set to true when the P2P Device is
+ * discoverable and there is at least another active P2P GO. Settings
+ * this flag will allow the P2P Device to be discoverable on other
+ * channels in addition to its listen channel.
+ * Note that this flag should not be set in other cases as it opens the
+ * Rx filters on all MAC and increases the number of interrupts.
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_go_iterator, &data);
+
+ cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ struct iwl_mac_beacon_cmd *beacon_cmd,
+ u8 *beacon, u32 frame_size)
+{
+ u32 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /* The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon. */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx+1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
+ beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
+ } else {
+ IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+ }
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_host_cmd cmd = {
+ .id = BEACON_TEMPLATE_CMD,
+ .flags = CMD_ASYNC,
+ };
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ struct ieee80211_tx_info *info;
+ u32 beacon_skb_len;
+ u32 rate;
+
+ if (WARN_ON(!beacon))
+ return -EINVAL;
+
+ beacon_skb_len = beacon->len;
+
+ /* TODO: for now the beacon template id is set to be the mac context id.
+ * Might be better to handle it as another resource ... */
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ /* Set up TX command fields */
+ beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
+ beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
+ beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ beacon_cmd.tx.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS |
+ TX_CMD_FLG_TSF);
+
+ mvm->mgmt_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+ mvm->mgmt_last_antenna_idx);
+
+ beacon_cmd.tx.rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+
+ info = IEEE80211_SKB_CB(beacon);
+
+ if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+ rate = IWL_FIRST_OFDM_RATE;
+ } else {
+ rate = IWL_FIRST_CCK_RATE;
+ beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+ }
+ beacon_cmd.tx.rate_n_flags |=
+ cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+
+ /* Set up TX beacon command fields */
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+ beacon->data,
+ beacon_skb_len);
+
+ /* Submit command */
+ cmd.len[0] = sizeof(beacon_cmd);
+ cmd.data[0] = &beacon_cmd;
+ cmd.dataflags[0] = 0;
+ cmd.len[1] = beacon_skb_len;
+ cmd.data[1] = beacon->data;
+ cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/* The beacon template for the AP/GO context has changed and needs update */
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *beacon;
+ int ret;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+ beacon = ieee80211_beacon_get(mvm->hw, vif);
+ if (!beacon)
+ return -ENOMEM;
+
+ ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
+ dev_kfree_skb(beacon);
+ return ret;
+}
+
+/*
+ * Fill the specific data for mac context of type AP of P2P GO
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_data_ap *ctxt_ap)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 curr_dev_time;
+
+ ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_ap->bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+ ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+ ctxt_ap->dtim_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period));
+
+ ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+ curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time);
+
+ ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time);
+
+ /* TODO: Assume that the beacon id == mac context id */
+ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for ap mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /* Fill the data specific for GO mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap);
+
+ cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
+ cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u32 action)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ return iwl_mvm_mac_ctxt_cmd_station(mvm, vif,
+ action);
+ else
+ return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
+ action);
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!vif->p2p)
+ return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+ else
+ return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD);
+ if (ret)
+ return ret;
+
+ mvmvif->uploaded = true;
+ return 0;
+}
+
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd;
+ int ret;
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
+ return ret;
+ }
+
+ mvmvif->uploaded = false;
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
new file mode 100644
index 00000000000..e27eb972411
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -0,0 +1,1316 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-io.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-event.h"
+#include "iwl-eeprom-parse.h"
+#include "fw-api-scan.h"
+#include "iwl-phy-db.h"
+
+static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits),
+ },
+};
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ int num_mac, ret;
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_AMPDU_AGGREGATION;
+
+ hw->queues = IWL_FIRST_AMPDU_QUEUE;
+ hw->offchannel_tx_hw_queue = IWL_OFFCHANNEL_QUEUE;
+ hw->rate_control_algorithm = "iwl-mvm-rs";
+
+ /*
+ * Enable 11w if advertised by firmware and software crypto
+ * is not enabled (as the firmware will interpret some mgmt
+ * packets, so enabling it with software crypto isn't safe)
+ */
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+ !iwlwifi_mod_params.sw_crypto)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->sta_data_size = sizeof(struct iwl_mvm_sta);
+ hw->vif_data_size = sizeof(struct iwl_mvm_vif);
+ hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwl_mvm_iface_combinations);
+
+ hw->wiphy->max_remain_on_channel_duration = 500;
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ /* Extract MAC address */
+ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
+ hw->wiphy->addresses = mvm->addresses;
+ hw->wiphy->n_addresses = 1;
+ num_mac = mvm->nvm_data->n_hw_addrs;
+ if (num_mac > 1) {
+ memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr,
+ ETH_ALEN);
+ mvm->addresses[1].addr[5]++;
+ hw->wiphy->n_addresses++;
+ }
+
+ /* we create the 802.11 header and a max-length SSID element */
+ hw->wiphy->max_scan_ie_len =
+ mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+
+ if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+
+ hw->wiphy->hw_version = mvm->trans->hw_id;
+
+ if (iwlwifi_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_P2P_GO_OPPPS;
+
+ mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+#ifdef CONFIG_PM_SLEEP
+ if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ mvm->trans->ops->d3_suspend &&
+ mvm->trans->ops->d3_resume &&
+ device_can_wakeup(mvm->trans->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
+ if (!iwlwifi_mod_params.sw_crypto)
+ hw->wiphy->wowlan.flags |=
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ }
+#endif
+
+ ret = iwl_mvm_leds_init(mvm);
+ if (ret)
+ return ret;
+
+ return ieee80211_register_hw(mvm->hw);
+}
+
+static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) {
+ IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n");
+ goto drop;
+ }
+
+ if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_OFFCHANNEL_QUEUE &&
+ !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+ goto drop;
+
+ if (control->sta) {
+ if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+ goto drop;
+ return;
+ }
+
+ if (iwl_mvm_tx_skb_non_sta(mvm, skb))
+ goto drop;
+ return;
+ drop:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid,
+ u16 *ssn, u8 buf_size)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
+ sta->addr, tid, action);
+
+ if (!(mvm->nvm_data->sku_cap_11n_enable))
+ return -EACCES;
+
+ mutex_lock(&mvm->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->uploaded = false;
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ /* does this make sense at all? */
+ mvmvif->color++;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvmvif->phy_ctxt = NULL;
+}
+
+static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+{
+ iwl_trans_stop_device(mvm->trans);
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+
+ /* just in case one was running */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_cleanup_iterator, mvm);
+
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+
+ ieee80211_wake_queues(mvm->hw);
+
+ mvm->vif_count = 0;
+}
+
+static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Clean up some internal and mac80211 state on restart */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_restart_cleanup(mvm);
+
+ ret = iwl_mvm_up(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ ret = iwl_mvm_update_quotas(mvm, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
+ ret);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ flush_work(&mvm->async_handlers_wk);
+
+ mutex_lock(&mvm->mutex);
+ /* async_handlers_wk is now blocked */
+
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ cancel_work_sync(&mvm->roc_done_wk);
+
+ iwl_trans_stop_device(mvm->trans);
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ iwl_mvm_async_handlers_purge(mvm);
+ /* async_handlers_list is empty and will stay empty: HW is stopped */
+
+ /* the fw is stopped, the aux sta is dead: clean up driver state */
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * The worker might have been waiting for the mutex, let it run and
+ * discover that its list is now empty.
+ */
+ cancel_work_sync(&mvm->async_handlers_wk);
+}
+
+static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ int ret;
+
+ ret = iwl_mvm_power_disable(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to disable power management\n");
+}
+
+static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+
+ iwl_mvm_power_update_mode(mvm, vif);
+}
+
+static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ /*
+ * Not much to do here. The stack will not allow interface
+ * types or combinations that we didn't advertise, so we
+ * don't really have to check the types.
+ */
+
+ mutex_lock(&mvm->mutex);
+
+ /* Allocate resources for the MAC context, and add it the the fw */
+ ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * The AP binding flow can be done only after the beacon
+ * template is configured (which happens only in the mac80211
+ * start_ap() flow), and adding the broadcast station can happen
+ * only after the binding.
+ * In addition, since modifying the MAC before adding a bcast
+ * station is not allowed by the FW, delay the adding of MAC context to
+ * the point where we can also add the bcast station.
+ * In short: there's not much we can do at this point, other than
+ * allocating resources :)
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
+ qmask);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to allocate bcast sta\n");
+ goto out_release;
+ }
+
+ goto out_unlock;
+ }
+
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * Iterate and disable PM on all active interfaces.
+ * Note: the method below does not count the new interface being added
+ * at this moment.
+ */
+ mvm->vif_count++;
+ if (mvm->vif_count > 1) {
+ IWL_DEBUG_MAC80211(mvm,
+ "Disable power on existing interfaces\n");
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_pm_disable_iterator, mvm);
+ }
+
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_release;
+
+ /*
+ * Update power state on the new interface. Admittedly, based on
+ * mac80211 logics this power update will disable power management
+ */
+ iwl_mvm_power_update_mode(mvm, vif);
+
+ /*
+ * P2P_DEVICE interface does not have a channel context assigned to it,
+ * so a dedicated PHY context is allocated to it and the corresponding
+ * MAC context is bound to it at this stage.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ mvmvif->phy_ctxt = &mvm->phy_ctxt_roc;
+
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten as part of the ROC flow.
+ * For now use the first channel we have.
+ */
+ chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt,
+ &chandef, 1, 1);
+ if (ret)
+ goto out_remove_mac;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_remove_phy;
+
+ ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
+ if (ret)
+ goto out_unbind;
+
+ /* Save a pointer to p2p device vif, so it can later be used to
+ * update the p2p device MAC when a GO is started/stopped */
+ mvm->p2p_device_vif = vif;
+ }
+
+ goto out_unlock;
+
+ out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ out_remove_phy:
+ iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ out_remove_mac:
+ mvmvif->phy_ctxt = NULL;
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+ out_release:
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * Check if only one additional interface remains after rereasing
+ * current one. Update power mode on the remaining interface.
+ */
+ mvm->vif_count--;
+ IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count == 1) {
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
+ }
+ iwl_mvm_mac_ctxt_release(mvm, vif);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 tfd_msk = 0, ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ tfd_msk |= BIT(vif->hw_queue[ac]);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ tfd_msk |= BIT(vif->cab_queue);
+
+ if (tfd_msk) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
+ mutex_unlock(&mvm->mutex);
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ /*
+ * Flush the ROC worker which will flush the OFFCHANNEL queue.
+ * We assume here that all the packets sent to the OFFCHANNEL
+ * queue are sent in ROC session.
+ */
+ flush_work(&mvm->roc_done_wk);
+ } else {
+ /*
+ * By now, all the AC queues are empty. The AGG queues are
+ * empty too. We already got all the Tx responses for all the
+ * packets in the queues. The drain work can have been
+ * triggered. Flush it. This work item takes the mutex, so kill
+ * it before we take it.
+ */
+ flush_work(&mvm->sta_drained_wk);
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ /*
+ * For AP/GO interface, the tear down of the resources allocated to the
+ * interface should be handled as part of the bss_info_changed flow.
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
+ goto out_release;
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mvm->p2p_device_vif = NULL;
+ iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ mvmvif->phy_ctxt = NULL;
+ }
+
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * Check if only one additional interface remains after removing
+ * current one. Update power mode on the remaining interface.
+ */
+ if (mvm->vif_count)
+ mvm->vif_count--;
+ IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count == 1) {
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
+ }
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+out_release:
+ iwl_mvm_mac_ctxt_release(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ *total_flags = 0;
+}
+
+static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ /* add quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, vif);
+ if (ret) {
+ IWL_ERR(mvm, "failed to update quotas\n");
+ return;
+ }
+ } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ /* remove AP station now that the MAC is unassoc */
+ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
+ if (ret)
+ IWL_ERR(mvm, "failed to remove AP station\n");
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ /* remove quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to update quotas\n");
+ }
+ } else if (changes & BSS_CHANGED_DTIM_PERIOD) {
+ /*
+ * We received a beacon _after_ association so
+ * remove the session protection.
+ */
+ iwl_mvm_remove_time_event(mvm, mvmvif,
+ &mvmvif->time_event_data);
+ } else if (changes & BSS_CHANGED_PS) {
+ /*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single
+ * MAC. Avoid power mode update if more than one interface
+ * is active.
+ */
+ IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count == 1) {
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+ }
+}
+
+static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Send the beacon template */
+ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Add the mac context */
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Perform the binding */
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_remove;
+
+ mvmvif->ap_active = true;
+
+ /* Send the bcast station. At this stage the TBTT and DTIM time events
+ * are added and applied to the scheduler */
+ ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
+ if (ret)
+ goto out_unbind;
+
+ ret = iwl_mvm_update_quotas(mvm, vif);
+ if (ret)
+ goto out_rm_bcast;
+
+ /* Need to update the P2P Device MAC */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+
+ mutex_unlock(&mvm->mutex);
+ return 0;
+
+out_rm_bcast:
+ iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+out_remove:
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->ap_active = false;
+
+ /* Need to update the P2P Device MAC */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+
+ iwl_mvm_update_quotas(mvm, NULL);
+ iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ /* Need to send a new beacon template to the FW */
+ if (changes & BSS_CHANGED_BEACON) {
+ if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
+ }
+}
+
+static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ ret = iwl_mvm_scan_request(mvm, vif, req);
+ else
+ ret = -EBUSY;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+
+ iwl_mvm_cancel_scan(mvm);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+ /* TODO: how do we tell the fw to send frames for a specific TID */
+
+ /*
+ * The fw will send EOSP notification when the last frame will be
+ * transmitted.
+ */
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, mvmsta->sta_id, reason,
+ num_frames);
+}
+
+static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (atomic_read(&mvmsta->pending_frames) > 0)
+ ieee80211_sta_block_awake(hw, sta, true);
+ /*
+ * The fw updates the STA to be asleep. Tx packets on the Tx
+ * queues to this station will not be transmitted. The fw will
+ * send a Tx response with TX_STATUS_FAIL_DEST_PS.
+ */
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (WARN_ON(mvmsta->sta_id == IWL_INVALID_STATION))
+ break;
+ iwl_mvm_sta_modify_ps_wake(mvm, mvmsta->sta_id);
+ break;
+ default:
+ break;
+ }
+}
+
+static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ /* this would be a mac80211 bug ... but don't crash */
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ /* if a STA is being removed, reuse its ID */
+ flush_work(&mvm->sta_drained_wk);
+
+ mutex_lock(&mvm->mutex);
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = iwl_mvm_add_sta(mvm, vif, sta);
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = iwl_mvm_update_sta(mvm, vif, sta);
+ if (ret == 0)
+ iwl_mvm_rs_rate_init(mvm, sta,
+ mvmvif->phy_ctxt->channel->band);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ ret = iwl_mvm_rm_sta(mvm, vif, sta);
+ } else {
+ ret = -EIO;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mvm->rts_threshold = value;
+
+ return 0;
+}
+
+static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->queue_params[ac] = *params;
+
+ /*
+ * No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+ return ret;
+ }
+ return 0;
+}
+
+static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
+ 200 + vif->bss_conf.beacon_int);
+ u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
+ 100 + vif->bss_conf.beacon_int);
+
+ if (WARN_ON_ONCE(vif->bss_conf.assoc))
+ return;
+
+ mutex_lock(&mvm->mutex);
+ /* Try really hard to protect the session and hear a beacon */
+ iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ if (iwlwifi_mod_params.sw_crypto) {
+ IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall-through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /*
+ * Support for TX only, at least for now, so accept
+ * the key and do nothing else. Then mac80211 will
+ * pass it for TX but we don't have to use it for RX.
+ */
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ switch (cmd) {
+ case SET_KEY:
+ IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+ if (ret) {
+ IWL_WARN(mvm, "set key failed\n");
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ ret = 0;
+ }
+
+ break;
+ case DISABLE_KEY:
+ IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
+ ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
+}
+
+
+static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel,
+ int duration)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct cfg80211_chan_def chandef;
+ int ret;
+
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
+ return -EINVAL;
+ }
+
+ IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value,
+ duration);
+
+ mutex_lock(&mvm->mutex);
+
+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+ ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
+ &chandef, 1, 1);
+
+ /* Schedule the time events */
+ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration);
+
+ mutex_unlock(&mvm->mutex);
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+
+ return ret;
+}
+
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ IWL_DEBUG_MAC80211(mvm, "enter\n");
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_stop_p2p_roc(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+ return 0;
+}
+
+static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "Add PHY context\n");
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt);
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->phy_ctxt = phyctx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ /*
+ * The AP binding flow is handled as part of the start_ap flow
+ * (in bss_info_changed).
+ */
+ ret = 0;
+ goto out_unlock;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Setting the quota at this stage is only required for monitor
+ * interfaces. For the other types, the bss_info changed flow
+ * will handle quota settings.
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = iwl_mvm_update_quotas(mvm, vif);
+ if (ret)
+ goto out_remove_binding;
+ }
+
+ goto out_unlock;
+
+ out_remove_binding:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+ if (ret)
+ mvmvif->phy_ctxt = NULL;
+ return ret;
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mutex_lock(&mvm->mutex);
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ goto out_unlock;
+
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
+ iwl_mvm_update_quotas(mvm, vif);
+ break;
+ default:
+ break;
+ }
+
+out_unlock:
+ mvmvif->phy_ctxt = NULL;
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ bool set)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ if (!mvm_sta || !mvm_sta->vif) {
+ IWL_ERR(mvm, "Station is not associated to a vif\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
+}
+
+struct ieee80211_ops iwl_mvm_hw_ops = {
+ .tx = iwl_mvm_mac_tx,
+ .ampdu_action = iwl_mvm_mac_ampdu_action,
+ .start = iwl_mvm_mac_start,
+ .restart_complete = iwl_mvm_mac_restart_complete,
+ .stop = iwl_mvm_mac_stop,
+ .add_interface = iwl_mvm_mac_add_interface,
+ .remove_interface = iwl_mvm_mac_remove_interface,
+ .config = iwl_mvm_mac_config,
+ .configure_filter = iwl_mvm_configure_filter,
+ .bss_info_changed = iwl_mvm_bss_info_changed,
+ .hw_scan = iwl_mvm_mac_hw_scan,
+ .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_state = iwl_mvm_mac_sta_state,
+ .sta_notify = iwl_mvm_mac_sta_notify,
+ .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .conf_tx = iwl_mvm_mac_conf_tx,
+ .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .set_key = iwl_mvm_mac_set_key,
+ .update_tkip_key = iwl_mvm_mac_update_tkip_key,
+ .remain_on_channel = iwl_mvm_roc,
+ .cancel_remain_on_channel = iwl_mvm_cancel_roc,
+
+ .add_chanctx = iwl_mvm_add_chanctx,
+ .remove_chanctx = iwl_mvm_remove_chanctx,
+ .change_chanctx = iwl_mvm_change_chanctx,
+ .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+
+ .start_ap = iwl_mvm_start_ap,
+ .stop_ap = iwl_mvm_stop_ap,
+
+ .set_tim = iwl_mvm_set_tim,
+
+#ifdef CONFIG_PM_SLEEP
+ /* look at d3.c */
+ .suspend = iwl_mvm_suspend,
+ .resume = iwl_mvm_resume,
+ .set_wakeup = iwl_mvm_set_wakeup,
+ .set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+ .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
new file mode 100644
index 00000000000..4e339ccfa80
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_H__
+#define __IWL_MVM_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <linux/in6.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+#include "iwl-notif-wait.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-test.h"
+#include "iwl-trans.h"
+#include "sta.h"
+#include "fw-api.h"
+
+#define IWL_INVALID_MAC80211_QUEUE 0xff
+#define IWL_MVM_MAX_ADDRESSES 2
+#define IWL_RSSI_OFFSET 44
+
+enum iwl_mvm_tx_fifo {
+ IWL_MVM_TX_FIFO_BK = 0,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_VO,
+};
+
+/* Placeholder */
+#define IWL_OFFCHANNEL_QUEUE 8
+#define IWL_FIRST_AMPDU_QUEUE 11
+
+extern struct ieee80211_ops iwl_mvm_hw_ops;
+/**
+ * struct iwl_mvm_mod_params - module parameters for iwlmvm
+ * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
+ * We will register to mac80211 to have testmode working. The NIC must not
+ * be up'ed after the INIT fw asserted. This is useful to be able to use
+ * proprietary tools over testmode to debug the INIT fw.
+ * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
+ * Save)-2(default), LP(Low Power)-3
+ */
+struct iwl_mvm_mod_params {
+ bool init_dbg;
+ int power_scheme;
+};
+extern struct iwl_mvm_mod_params iwlmvm_mod_params;
+
+struct iwl_mvm_phy_ctxt {
+ u16 id;
+ u16 color;
+
+ /*
+ * TODO: This should probably be removed. Currently here only for rate
+ * scaling algorithm
+ */
+ struct ieee80211_channel *channel;
+};
+
+struct iwl_mvm_time_event_data {
+ struct ieee80211_vif *vif;
+ struct list_head list;
+ unsigned long end_jiffies;
+ u32 duration;
+ bool running;
+ u32 uid;
+
+ /*
+ * The access to the 'id' field must be done when the
+ * mvm->time_event_lock is held, as it value is used to indicate
+ * if the te is in the time event list or not (when id == TE_MAX)
+ */
+ u32 id;
+};
+
+ /* Power management */
+
+/**
+ * enum iwl_power_scheme
+ * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
+ * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
+ * @IWL_POWER_LEVEL_LP - Low Power
+ */
+enum iwl_power_scheme {
+ IWL_POWER_SCHEME_CAM = 1,
+ IWL_POWER_SCHEME_BPS,
+ IWL_POWER_SCHEME_LP
+};
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+
+/**
+ * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @id: between 0 and 3
+ * @color: to solve races upon MAC addition and removal
+ * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
+ * @uploaded: indicates the MAC context has been added to the device
+ * @ap_active: indicates that ap context is configured, and that the interface
+ * should get quota etc.
+ * @queue_params: QoS params for this MAC
+ * @bcast_sta: station used for broadcast packets. Used by the following
+ * vifs: P2P_DEVICE, GO and AP.
+ * @beacon_skb: the skb used to hold the AP/GO beacon template
+ */
+struct iwl_mvm_vif {
+ u16 id;
+ u16 color;
+ u8 ap_sta_id;
+
+ bool uploaded;
+ bool ap_active;
+
+ enum iwl_tsf_id tsf_id;
+
+ /*
+ * QoS data from mac80211, need to store this here
+ * as mac80211 has a separate callback but we need
+ * to have the data for the MAC context
+ */
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct iwl_mvm_time_event_data time_event_data;
+
+ struct iwl_mvm_int_sta bcast_sta;
+
+ /*
+ * Assigned while mac80211 has the interface in a channel context,
+ * or, for P2P Device, while it exists.
+ */
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+
+#ifdef CONFIG_PM_SLEEP
+ /* WoWLAN GTK rekey data */
+ struct {
+ u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+ bool valid;
+ } rekey_data;
+
+ int tx_key_idx;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 addresses for WoWLAN */
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
+ int num_target_ipv6_addrs;
+#endif
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *dbgfs_dir;
+ void *dbgfs_data;
+#endif
+};
+
+static inline struct iwl_mvm_vif *
+iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+ return (void *)vif->drv_priv;
+}
+
+enum iwl_mvm_status {
+ IWL_MVM_STATUS_HW_RFKILL,
+ IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_IN_HW_RESTART,
+};
+
+enum iwl_scan_status {
+ IWL_MVM_SCAN_NONE,
+ IWL_MVM_SCAN_OS,
+};
+
+/**
+ * struct iwl_nvm_section - describes an NVM section in memory.
+ *
+ * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
+ * and saved for later use by the driver. Not all NVM sections are saved
+ * this way, only the needed ones.
+ */
+struct iwl_nvm_section {
+ u16 length;
+ const u8 *data;
+};
+
+struct iwl_mvm {
+ /* for logger access */
+ struct device *dev;
+
+ struct iwl_trans *trans;
+ const struct iwl_fw *fw;
+ const struct iwl_cfg *cfg;
+ struct iwl_phy_db *phy_db;
+ struct ieee80211_hw *hw;
+
+ /* for protecting access to iwl_mvm */
+ struct mutex mutex;
+ struct list_head async_handlers_list;
+ spinlock_t async_handlers_lock;
+ struct work_struct async_handlers_wk;
+
+ struct work_struct roc_done_wk;
+
+ unsigned long status;
+
+ enum iwl_ucode_type cur_ucode;
+ bool ucode_loaded;
+ bool init_ucode_run;
+ u32 error_event_table;
+ u32 log_event_table;
+
+ u32 ampdu_ref;
+
+ struct iwl_notif_wait_data notif_wait;
+
+ unsigned long transport_queue_stop;
+ u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+ atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+
+ struct iwl_nvm_data *nvm_data;
+ /* eeprom blob for debugfs/testmode */
+ u8 *eeprom_blob;
+ size_t eeprom_blob_size;
+ /* NVM sections for 7000 family */
+ struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
+
+ /* data related to data path */
+ struct iwl_rx_phy_info last_phy_info;
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+ struct work_struct sta_drained_wk;
+ unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+
+ /* configured by mac80211 */
+ u32 rts_threshold;
+
+ /* Scan status, cmd (pre-allocated) and auxiliary station */
+ enum iwl_scan_status scan_status;
+ struct iwl_scan_cmd *scan_cmd;
+
+ /* Internal station */
+ struct iwl_mvm_int_sta aux_sta;
+
+ u8 scan_last_antenna_idx; /* to toggle TX between antennas */
+ u8 mgmt_last_antenna_idx;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool prevent_power_down_d3;
+#endif
+
+ struct iwl_mvm_phy_ctxt phy_ctxt_roc;
+
+ struct list_head time_event_list;
+ spinlock_t time_event_lock;
+
+ /*
+ * A bitmap indicating the index of the key in use. The firmware
+ * can hold 16 keys at most. Reflect this fact.
+ */
+ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ u8 vif_count;
+
+ struct led_classdev led;
+
+ struct ieee80211_vif *p2p_device_vif;
+};
+
+/* Extract MVM priv from op_mode and _hw */
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
+ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MVM(_hw) \
+ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+
+extern const u8 iwl_mvm_ac_to_tx_fifo[];
+
+struct iwl_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+};
+
+/******************
+ * MVM Methods
+ ******************/
+/* uCode */
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+
+/* Utils */
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum ieee80211_band band);
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+u8 first_antenna(u8 mask);
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+
+/* Tx / Host Commands */
+int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+ u32 flags, u16 len, const void *data);
+int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd,
+ u32 *status);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+ u16 len, const void *data,
+ u32 *status);
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status);
+#else
+static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+
+/* Statistics */
+int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+/* NVM */
+int iwl_nvm_init(struct iwl_mvm *mvm);
+
+int iwl_mvm_up(struct iwl_mvm *mvm);
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+
+/*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+ */
+int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+/* MVM PHY */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic);
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic);
+void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+
+/* MAC (virtual interface) programming */
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/* Bindings */
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+/* Quota management */
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif);
+
+/* Scanning */
+int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+
+/* MVM debugfs */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+int iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct dentry *dbgfs_dir);
+void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd);
+#else
+static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+ struct dentry *dbgfs_dir)
+{
+ return 0;
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+/* rate scaling */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+ u8 flags, bool init);
+
+/* power managment */
+int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm);
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+
+/* D3 (WoWLAN, NetDetect) */
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwl_mvm_resume(struct ieee80211_hw *hw);
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
+
+#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
new file mode 100644
index 00000000000..20016bcbdea
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -0,0 +1,311 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-nvm-parse.h"
+
+/* list of NVM sections we are allowed/need to read */
+static const int nvm_to_read[] = {
+ NVM_SECTION_TYPE_HW,
+ NVM_SECTION_TYPE_SW,
+ NVM_SECTION_TYPE_CALIBRATION,
+ NVM_SECTION_TYPE_PRODUCTION,
+};
+
+/* used to simplify the shared operations on NCM_ACCESS_CMD versions */
+union iwl_nvm_access_cmd {
+ struct iwl_nvm_access_cmd_ver1 ver1;
+ struct iwl_nvm_access_cmd_ver2 ver2;
+};
+union iwl_nvm_access_resp {
+ struct iwl_nvm_access_resp_ver1 ver1;
+ struct iwl_nvm_access_resp_ver2 ver2;
+};
+
+static inline void iwl_nvm_fill_read_ver1(struct iwl_nvm_access_cmd_ver1 *cmd,
+ u16 offset, u16 length)
+{
+ cmd->offset = cpu_to_le16(offset);
+ cmd->length = cpu_to_le16(length);
+ cmd->cache_refresh = 1;
+}
+
+static inline void iwl_nvm_fill_read_ver2(struct iwl_nvm_access_cmd_ver2 *cmd,
+ u16 offset, u16 length, u16 section)
+{
+ cmd->offset = cpu_to_le16(offset);
+ cmd->length = cpu_to_le16(length);
+ cmd->type = cpu_to_le16(section);
+}
+
+static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, u8 *data)
+{
+ union iwl_nvm_access_cmd nvm_access_cmd;
+ union iwl_nvm_access_resp *nvm_resp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ .data = { &nvm_access_cmd, },
+ };
+ int ret, bytes_read, offset_read;
+ u8 *resp_data;
+
+ memset(&nvm_access_cmd, 0, sizeof(nvm_access_cmd));
+
+ /* TODO: not sure family should be the decider, maybe FW version? */
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_nvm_fill_read_ver2(&(nvm_access_cmd.ver2),
+ offset, length, section);
+ cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver2);
+ } else {
+ iwl_nvm_fill_read_ver1(&(nvm_access_cmd.ver1),
+ offset, length);
+ cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver1);
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* Extract NVM response */
+ nvm_resp = (void *)pkt->data;
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ ret = le16_to_cpu(nvm_resp->ver2.status);
+ bytes_read = le16_to_cpu(nvm_resp->ver2.length);
+ offset_read = le16_to_cpu(nvm_resp->ver2.offset);
+ resp_data = nvm_resp->ver2.data;
+ } else {
+ ret = le16_to_cpu(nvm_resp->ver1.length) <= 0;
+ bytes_read = le16_to_cpu(nvm_resp->ver1.length);
+ offset_read = le16_to_cpu(nvm_resp->ver1.offset);
+ resp_data = nvm_resp->ver1.data;
+ }
+ if (ret) {
+ IWL_ERR(mvm,
+ "NVM access command failed with status %d (device: %s)\n",
+ ret, mvm->cfg->name);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (offset_read != offset) {
+ IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
+ offset_read);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Write data to NVM */
+ memcpy(data + offset, resp_data, bytes_read);
+ ret = bytes_read;
+
+exit:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
+ u8 *data)
+{
+ u16 length, offset = 0;
+ int ret;
+ bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000;
+
+ length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024))
+ - sizeof(union iwl_nvm_access_cmd)
+ - sizeof(struct iwl_rx_packet);
+ /*
+ * if length is greater than EEPROM size, truncate it because uCode
+ * doesn't check it by itself, and exit the loop when reached.
+ */
+ if (old_eeprom && length > mvm->cfg->base_params->eeprom_size)
+ length = mvm->cfg->base_params->eeprom_size;
+ ret = length;
+
+ /* Read the NVM until exhausted (reading less than requested) */
+ while (ret == length) {
+ ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+ if (ret < 0) {
+ IWL_ERR(mvm,
+ "Cannot read NVM from section %d offset %d, length %d\n",
+ section, offset, length);
+ return ret;
+ }
+ offset += ret;
+ if (old_eeprom && offset == mvm->cfg->base_params->eeprom_size)
+ break;
+ }
+
+ IWL_INFO(mvm, "NVM section %d read completed\n", section);
+ return offset;
+}
+
+static struct iwl_nvm_data *
+iwl_parse_nvm_sections(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+ const __le16 *hw, *sw, *calib;
+
+ /* Checking for required sections */
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
+ IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+ return NULL;
+ }
+
+ if (WARN_ON(!mvm->cfg))
+ return NULL;
+
+ hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+ sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+ calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib);
+}
+
+int iwl_nvm_init(struct iwl_mvm *mvm)
+{
+ int ret, i, section;
+ u8 *nvm_buffer, *temp;
+
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /* TODO: find correct NVM max size for a section */
+ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section = nvm_to_read[i];
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+ if (ret < 0)
+ break;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
+ }
+ kfree(nvm_buffer);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* allocate eeprom */
+ mvm->eeprom_blob_size = mvm->cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM size = %zd\n",
+ mvm->eeprom_blob_size);
+ mvm->eeprom_blob = kzalloc(mvm->eeprom_blob_size, GFP_KERNEL);
+ if (!mvm->eeprom_blob)
+ return -ENOMEM;
+
+ ret = iwl_nvm_read_section(mvm, 0, mvm->eeprom_blob);
+ if (ret != mvm->eeprom_blob_size) {
+ IWL_ERR(mvm, "Read partial NVM %d/%zd\n",
+ ret, mvm->eeprom_blob_size);
+ kfree(mvm->eeprom_blob);
+ mvm->eeprom_blob = NULL;
+ return -EINVAL;
+ }
+ }
+
+ ret = 0;
+ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+ else
+ mvm->nvm_data =
+ iwl_parse_eeprom_data(mvm->trans->dev,
+ mvm->cfg,
+ mvm->eeprom_blob,
+ mvm->eeprom_blob_size);
+
+ if (!mvm->nvm_data) {
+ kfree(mvm->eeprom_blob);
+ mvm->eeprom_blob = NULL;
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
new file mode 100644
index 00000000000..aa59adf87db
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -0,0 +1,682 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/module.h>
+#include <net/mac80211.h>
+
+#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-fw.h"
+#include "iwl-debug.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-phy-db.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "rs.h"
+#include "fw-api-scan.h"
+#include "time-event.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
+
+#define DRV_VERSION IWLWIFI_VERSION
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct iwl_op_mode_ops iwl_mvm_ops;
+
+struct iwl_mvm_mod_params iwlmvm_mod_params = {
+ .power_scheme = IWL_POWER_SCHEME_BPS,
+ /* rest of fields are 0 by default */
+};
+
+module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
+MODULE_PARM_DESC(init_dbg,
+ "set to true to debug an ASSERT in INIT fw (default: false");
+module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
+MODULE_PARM_DESC(power_scheme,
+ "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+
+/*
+ * module init and exit functions
+ */
+static int __init iwl_mvm_init(void)
+{
+ int ret;
+
+ ret = iwl_mvm_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+
+ if (ret) {
+ pr_err("Unable to register MVM op_mode: %d\n", ret);
+ iwl_mvm_rate_control_unregister();
+ }
+
+ return ret;
+}
+module_init(iwl_mvm_init);
+
+static void __exit iwl_mvm_exit(void)
+{
+ iwl_opmode_deregister("iwlmvm");
+ iwl_mvm_rate_control_unregister();
+}
+module_exit(iwl_mvm_exit);
+
+static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ u32 reg_val = 0;
+
+ /*
+ * We can't upload the correct value to the INIT image
+ * as we don't have nvm_data by that time.
+ *
+ * TODO: Figure out what we should do here
+ */
+ if (mvm->nvm_data) {
+ radio_cfg_type = mvm->nvm_data->radio_cfg_type;
+ radio_cfg_step = mvm->nvm_data->radio_cfg_step;
+ radio_cfg_dash = mvm->nvm_data->radio_cfg_dash;
+ } else {
+ radio_cfg_type = 0;
+ radio_cfg_step = 0;
+ radio_cfg_dash = 0;
+ }
+
+ /* SKU control */
+ reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+ ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+
+ /* silicon bits */
+ reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+ reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
+
+ iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+ reg_val);
+
+ IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+ radio_cfg_step, radio_cfg_dash);
+
+ /*
+ * W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted), causing ME FW
+ * to lose ownership and not being able to obtain it back.
+ */
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+struct iwl_rx_handlers {
+ u8 cmd_id;
+ bool async;
+ int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+};
+
+#define RX_HANDLER(_cmd_id, _fn, _async) \
+ { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+
+/*
+ * Handlers for fw notifications
+ * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
+ * This list should be in order of frequency for performance purposes.
+ *
+ * The handler can be SYNC - this means that it will be called in the Rx path
+ * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
+ * only in this case!), it should be set as ASYNC. In that case, it will be
+ * called from a worker with mvm->mutex held.
+ */
+static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
+ RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
+ RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
+ RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
+ RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
+ RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+
+ RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
+ RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+
+ RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
+ RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
+
+ RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+};
+#undef RX_HANDLER
+#define CMD(x) [x] = #x
+
+static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+ CMD(MVM_ALIVE),
+ CMD(REPLY_ERROR),
+ CMD(INIT_COMPLETE_NOTIF),
+ CMD(PHY_CONTEXT_CMD),
+ CMD(MGMT_MCAST_KEY),
+ CMD(TX_CMD),
+ CMD(TXPATH_FLUSH),
+ CMD(MAC_CONTEXT_CMD),
+ CMD(TIME_EVENT_CMD),
+ CMD(TIME_EVENT_NOTIFICATION),
+ CMD(BINDING_CONTEXT_CMD),
+ CMD(TIME_QUOTA_CMD),
+ CMD(RADIO_VERSION_NOTIFICATION),
+ CMD(SCAN_REQUEST_CMD),
+ CMD(SCAN_ABORT_CMD),
+ CMD(SCAN_START_NOTIFICATION),
+ CMD(SCAN_RESULTS_NOTIFICATION),
+ CMD(SCAN_COMPLETE_NOTIFICATION),
+ CMD(NVM_ACCESS_CMD),
+ CMD(PHY_CONFIGURATION_CMD),
+ CMD(CALIB_RES_NOTIF_PHY_DB),
+ CMD(SET_CALIB_DEFAULT_CMD),
+ CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+ CMD(ADD_STA),
+ CMD(REMOVE_STA),
+ CMD(LQ_CMD),
+ CMD(SCAN_OFFLOAD_CONFIG_CMD),
+ CMD(SCAN_OFFLOAD_REQUEST_CMD),
+ CMD(SCAN_OFFLOAD_ABORT_CMD),
+ CMD(SCAN_OFFLOAD_COMPLETE),
+ CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+ CMD(POWER_TABLE_CMD),
+ CMD(WEP_KEY),
+ CMD(REPLY_RX_PHY_CMD),
+ CMD(REPLY_RX_MPDU_CMD),
+ CMD(BEACON_TEMPLATE_CMD),
+ CMD(STATISTICS_NOTIFICATION),
+ CMD(TX_ANT_CONFIGURATION_CMD),
+ CMD(D3_CONFIG_CMD),
+ CMD(PROT_OFFLOAD_CONFIG_CMD),
+ CMD(OFFLOADS_QUERY_CMD),
+ CMD(REMOTE_WAKE_CONFIG_CMD),
+ CMD(WOWLAN_PATTERNS),
+ CMD(WOWLAN_CONFIGURATION),
+ CMD(WOWLAN_TSC_RSC_PARAM),
+ CMD(WOWLAN_TKIP_PARAM),
+ CMD(WOWLAN_KEK_KCK_MATERIAL),
+ CMD(WOWLAN_GET_STATUSES),
+ CMD(WOWLAN_TX_POWER_PER_DB),
+ CMD(NET_DETECT_CONFIG_CMD),
+ CMD(NET_DETECT_PROFILES_QUERY_CMD),
+ CMD(NET_DETECT_PROFILES_CMD),
+ CMD(NET_DETECT_HOTSPOTS_CMD),
+ CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+};
+#undef CMD
+
+/* this forward declaration can avoid to export the function */
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+
+static struct iwl_op_mode *
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+ struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
+ struct iwl_mvm *mvm;
+ struct iwl_trans_config trans_cfg = {};
+ static const u8 no_reclaim_cmds[] = {
+ TX_CMD,
+ };
+ int err, scan_size;
+
+ switch (cfg->device_family) {
+ case IWL_DEVICE_FAMILY_6030:
+ case IWL_DEVICE_FAMILY_6005:
+ case IWL_DEVICE_FAMILY_7000:
+ break;
+ default:
+ IWL_ERR(trans, "Trying to load mvm on an unsupported device\n");
+ return NULL;
+ }
+
+ /********************************
+ * 1. Allocating and configuring HW data
+ ********************************/
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+ sizeof(struct iwl_mvm),
+ &iwl_mvm_hw_ops);
+ if (!hw)
+ return NULL;
+
+ op_mode = hw->priv;
+ op_mode->ops = &iwl_mvm_ops;
+ op_mode->trans = trans;
+
+ mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ mvm->dev = trans->dev;
+ mvm->trans = trans;
+ mvm->cfg = cfg;
+ mvm->fw = fw;
+ mvm->hw = hw;
+
+ mutex_init(&mvm->mutex);
+ spin_lock_init(&mvm->async_handlers_lock);
+ INIT_LIST_HEAD(&mvm->time_event_list);
+ INIT_LIST_HEAD(&mvm->async_handlers_list);
+ spin_lock_init(&mvm->time_event_lock);
+
+ INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
+ INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+ INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+
+ SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+
+ /*
+ * Populate the state variables that the transport layer needs
+ * to know about.
+ */
+ trans_cfg.op_mode = op_mode;
+ trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+ trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+
+ /* TODO: this should really be a TLV */
+ if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ trans_cfg.bc_table_dword = true;
+
+ if (!iwlwifi_mod_params.wd_disable)
+ trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
+ else
+ trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
+
+ trans_cfg.command_names = iwl_mvm_cmd_strings;
+
+ trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+ trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO;
+
+ snprintf(mvm->hw->wiphy->fw_version,
+ sizeof(mvm->hw->wiphy->fw_version),
+ "%s", fw->fw_version);
+
+ /* Configure transport layer */
+ iwl_trans_configure(mvm->trans, &trans_cfg);
+
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
+ /* set up notification wait support */
+ iwl_notification_wait_init(&mvm->notif_wait);
+
+ /* Init phy db */
+ mvm->phy_db = iwl_phy_db_init(trans);
+ if (!mvm->phy_db) {
+ IWL_ERR(mvm, "Cannot init phy_db\n");
+ goto out_free;
+ }
+
+ IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
+ mvm->cfg->name, mvm->trans->hw_rev);
+
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
+ goto out_free;
+
+ mutex_lock(&mvm->mutex);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ mutex_unlock(&mvm->mutex);
+ if (err && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
+ }
+
+ /* Stop the hw after the ALIVE and NVM has been read */
+ if (!iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_hw(mvm->trans, false);
+
+ scan_size = sizeof(struct iwl_scan_cmd) +
+ mvm->fw->ucode_capa.max_probe_length +
+ (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel));
+ mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
+ if (!mvm->scan_cmd)
+ goto out_free;
+
+ err = iwl_mvm_mac_setup_register(mvm);
+ if (err)
+ goto out_free;
+
+ err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
+ if (err)
+ goto out_unregister;
+
+ return op_mode;
+
+ out_unregister:
+ ieee80211_unregister_hw(mvm->hw);
+ out_free:
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+ kfree(mvm->eeprom_blob);
+ iwl_trans_stop_hw(trans, true);
+ ieee80211_free_hw(mvm->hw);
+ return NULL;
+}
+
+static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int i;
+
+ iwl_mvm_leds_exit(mvm);
+
+ ieee80211_unregister_hw(mvm->hw);
+
+ kfree(mvm->scan_cmd);
+
+ iwl_trans_stop_hw(mvm->trans, true);
+
+ iwl_phy_db_free(mvm->phy_db);
+ mvm->phy_db = NULL;
+
+ kfree(mvm->eeprom_blob);
+ iwl_free_nvm_data(mvm->nvm_data);
+ for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+ kfree(mvm->nvm_sections[i].data);
+
+ ieee80211_free_hw(mvm->hw);
+}
+
+struct iwl_async_handler_entry {
+ struct list_head list;
+ struct iwl_rx_cmd_buffer rxb;
+ int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+};
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mvm->async_handlers_lock);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ struct iwl_async_handler_entry *entry, *tmp;
+ struct list_head local_list;
+
+ INIT_LIST_HEAD(&local_list);
+
+ /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
+ mutex_lock(&mvm->mutex);
+
+ /*
+ * Sync with Rx path with a lock. Remove all the entries from this list,
+ * add them to a local one (lock free), and then handle them.
+ */
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_splice_init(&mvm->async_handlers_list, &local_list);
+ spin_unlock_bh(&mvm->async_handlers_lock);
+
+ list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ if (entry->fn(mvm, &entry->rxb, NULL))
+ IWL_WARN(mvm,
+ "returned value from ASYNC handlers are ignored\n");
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 i;
+
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+ const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
+ struct iwl_async_handler_entry *entry;
+
+ if (rx_h->cmd_id != pkt->hdr.cmd)
+ continue;
+
+ if (!rx_h->async)
+ return rx_h->fn(mvm, rxb, cmd);
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ /* we can't do much... */
+ if (!entry)
+ return 0;
+
+ entry->rxb._page = rxb_steal_page(rxb);
+ entry->rxb._offset = rxb->_offset;
+ entry->rxb._rx_page_order = rxb->_rx_page_order;
+ entry->fn = rx_h->fn;
+ spin_lock(&mvm->async_handlers_lock);
+ list_add_tail(&entry->list, &mvm->async_handlers_list);
+ spin_unlock(&mvm->async_handlers_lock);
+ schedule_work(&mvm->async_handlers_wk);
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int mq = mvm->queue_to_mac80211[queue];
+
+ if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+ return;
+
+ if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "queue %d (mac80211 %d) already stopped\n",
+ queue, mq);
+ return;
+ }
+
+ set_bit(mq, &mvm->transport_queue_stop);
+ ieee80211_stop_queue(mvm->hw, mq);
+}
+
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int mq = mvm->queue_to_mac80211[queue];
+
+ if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+ return;
+
+ if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "queue %d (mac80211 %d) already awake\n",
+ queue, mq);
+ return;
+ }
+
+ clear_bit(mq, &mvm->transport_queue_stop);
+
+ ieee80211_wake_queue(mvm->hw, mq);
+}
+
+static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+}
+
+static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_dump_nic_error_log(mvm);
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * If we're restarting already, don't cycle restarts.
+ * If INIT fw asserted, it will likely fail again.
+ * If WoWLAN fw asserted, don't restart either, mac80211
+ * can't recover this since we're already half suspended.
+ */
+ if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+ iwlwifi_mod_params.restart_fw) {
+ /*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted (sched) scan when that was already done
+ * which is not a problem. It is necessary to abort any scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211.
+ */
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ }
+
+ ieee80211_restart_hw(mvm->hw);
+ }
+}
+
+static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ WARN_ON(1);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops = {
+ .start = iwl_op_mode_mvm_start,
+ .stop = iwl_op_mode_mvm_stop,
+ .rx = iwl_mvm_rx_dispatch,
+ .queue_full = iwl_mvm_stop_sw_queue,
+ .queue_not_full = iwl_mvm_wake_sw_queue,
+ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
+ .free_skb = iwl_mvm_free_skb,
+ .nic_error = iwl_mvm_nic_error,
+ .cmd_queue_full = iwl_mvm_cmd_queue_full,
+ .nic_config = iwl_mvm_nic_config,
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
new file mode 100644
index 00000000000..b428448f8dd
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -0,0 +1,292 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+/* Maps the driver specific channel width definition to the the fw values */
+static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return PHY_VHT_CHANNEL_MODE20;
+ case NL80211_CHAN_WIDTH_40:
+ return PHY_VHT_CHANNEL_MODE40;
+ case NL80211_CHAN_WIDTH_80:
+ return PHY_VHT_CHANNEL_MODE80;
+ case NL80211_CHAN_WIDTH_160:
+ return PHY_VHT_CHANNEL_MODE160;
+ default:
+ WARN(1, "Invalid channel width=%u", chandef->width);
+ return PHY_VHT_CHANNEL_MODE20;
+ }
+}
+
+/*
+ * Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the the fw values
+ */
+static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->chan->center_freq - chandef->center_freq1) {
+ case -70:
+ return PHY_VHT_CTRL_POS_4_BELOW;
+ case -50:
+ return PHY_VHT_CTRL_POS_3_BELOW;
+ case -30:
+ return PHY_VHT_CTRL_POS_2_BELOW;
+ case -10:
+ return PHY_VHT_CTRL_POS_1_BELOW;
+ case 10:
+ return PHY_VHT_CTRL_POS_1_ABOVE;
+ case 30:
+ return PHY_VHT_CTRL_POS_2_ABOVE;
+ case 50:
+ return PHY_VHT_CTRL_POS_3_ABOVE;
+ case 70:
+ return PHY_VHT_CTRL_POS_4_ABOVE;
+ default:
+ WARN(1, "Invalid channel definition");
+ case 0:
+ /*
+ * The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one.
+ */
+ return PHY_VHT_CTRL_POS_1_BELOW;
+ }
+}
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+ struct iwl_phy_context_cmd *cmd,
+ u32 action, u32 apply_time)
+{
+ memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd->action = cpu_to_le32(action);
+ cmd->apply_time = cpu_to_le32(apply_time);
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ u8 valid_rx_chains, active_cnt, idle_cnt;
+
+ /* Set the channel info data */
+ cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5);
+
+ cmd->ci.channel = chandef->chan->hw_value;
+ cmd->ci.width = iwl_mvm_get_channel_width(chandef);
+ cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+
+ /* Set rx the chains */
+
+ /* TODO:
+ * Need to add on chain noise calibration limitations, and
+ * BT coex considerations.
+ */
+ valid_rx_chains = mvm->nvm_data->valid_rx_ant;
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ cmd->rxchain_info = cpu_to_le32(valid_rx_chains <<
+ PHY_RX_CHAIN_VALID_POS);
+ cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+ PHY_RX_CHAIN_MIMO_CNT_POS);
+
+ cmd->txchain_info = cpu_to_le32(mvm->nvm_data->valid_tx_ant);
+}
+
+/*
+ * Send a command to apply the current phy configuration. The command is send
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic,
+ u32 action, u32 apply_time)
+{
+ struct iwl_phy_context_cmd cmd;
+ int ret;
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+ chains_static, chains_dynamic);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+ sizeof(struct iwl_phy_context_cmd),
+ &cmd);
+ if (ret)
+ IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+ return ret;
+}
+
+
+struct phy_ctx_used_data {
+ unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)];
+};
+
+static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *_data)
+{
+ struct phy_ctx_used_data *data = _data;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+
+ __set_bit(phy_ctxt->id, data->used);
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct phy_ctx_used_data data = {
+ .used = { },
+ };
+
+ /*
+ * If this is a regular PHY context (not the ROC one)
+ * skip the ROC PHY context's ID.
+ */
+ if (ctxt != &mvm->phy_ctxt_roc)
+ __set_bit(mvm->phy_ctxt_roc.id, data.used);
+
+ lockdep_assert_held(&mvm->mutex);
+ ctxt->color++;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ ieee80211_iter_chan_contexts_atomic(
+ mvm->hw, iwl_mvm_phy_ctx_used_iter, &data);
+
+ ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX);
+ if (WARN_ONCE(ctxt->id == NUM_PHY_CTX,
+ "Failed to init PHY context - no free ID!\n"))
+ return -EIO;
+ }
+
+ ctxt->channel = chandef->chan;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD, 0);
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ ctxt->channel = chandef->chan;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_MODIFY, 0);
+}
+
+/*
+ * Send a command to the FW to remove the given phy context.
+ * Once the command is sent, regardless of success or failure, the context is
+ * marked as invalid
+ */
+void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ struct iwl_phy_context_cmd cmd;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+ sizeof(struct iwl_phy_context_cmd),
+ &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n",
+ ctxt->id);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
new file mode 100644
index 00000000000..5a92a497879
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw-api-power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static void iwl_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *chan;
+ int dtimper, dtimper_msec;
+ int keep_alive;
+ bool radar_detect = false;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd->action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
+
+ if ((!vif->bss_conf.ps) ||
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM))
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ dtimper = hw->conf.ps_dtim_period ?: 1;
+
+ /* Check if radar detection is required on current channel */
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ WARN_ON(!chanctx_conf);
+ if (chanctx_conf) {
+ chan = chanctx_conf->def.chan;
+ radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+ }
+ rcu_read_unlock();
+
+ /* Check skip over DTIM conditions */
+ if (!radar_detect && (dtimper <= 10) &&
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SLEEP_OVER_DTIM_MSK);
+ cmd->num_skip_dtim = 2;
+ }
+
+ /* Check that keep alive period is at least 3 * DTIM */
+ dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ keep_alive = max_t(int, 3 * dtimper_msec,
+ MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP) {
+ /* TODO: Also for D3 (device sleep / WoWLAN) */
+ cmd->rx_data_timeout = cpu_to_le32(10);
+ cmd->tx_data_timeout = cpu_to_le32(10);
+ } else {
+ cmd->rx_data_timeout = cpu_to_le32(50);
+ cmd->tx_data_timeout = cpu_to_le32(50);
+ }
+}
+
+int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_powertable_cmd cmd = {};
+
+ if (!iwlwifi_mod_params.power_save) {
+ IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
+ return 0;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ iwl_power_build_cmd(mvm, vif, &cmd);
+
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd.id_and_color, iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd.flags));
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+ le16_to_cpu(cmd.keep_alive_seconds));
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ cmd.lprx_rssi_threshold);
+ IWL_DEBUG_POWER(mvm, "DTIMs to skip = %u\n", cmd.num_skip_dtim);
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_powertable_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!iwlwifi_mod_params.power_save) {
+ IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
+ return 0;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
+
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd.id_and_color, iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd.flags));
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd)
+{
+ iwl_power_build_cmd(mvm, vif, cmd);
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
new file mode 100644
index 00000000000..92562846814
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_quota_iterator_data {
+ int n_interfaces[MAX_BINDINGS];
+ int colors[MAX_BINDINGS];
+ struct ieee80211_vif *new_vif;
+};
+
+static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_quota_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 id;
+
+ /*
+ * We'll account for the new interface (if any) below,
+ * skip it here in case we're not called from within
+ * the add_interface callback (otherwise it won't show
+ * up in iteration)
+ */
+ if (vif == data->new_vif)
+ return;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ /* currently, PHY ID == binding ID */
+ id = mvmvif->phy_ctxt->id;
+
+ /* need at least one binding per PHY */
+ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+
+ if (WARN_ON_ONCE(id >= MAX_BINDINGS))
+ return;
+
+ if (data->colors[id] < 0)
+ data->colors[id] = mvmvif->phy_ctxt->color;
+ else
+ WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->bss_conf.assoc)
+ data->n_interfaces[id]++;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (mvmvif->ap_active)
+ data->n_interfaces[id]++;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ data->n_interfaces[id]++;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (vif->bss_conf.ibss_joined)
+ data->n_interfaces[id]++;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
+{
+ struct iwl_time_quota_cmd cmd;
+ int i, idx, ret, num_active_bindings, quota, quota_rem;
+ struct iwl_mvm_quota_iterator_data data = {
+ .n_interfaces = {},
+ .colors = { -1, -1, -1, -1 },
+ .new_vif = newvif,
+ };
+
+ /* update all upon completion */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_quota_iterator, &data);
+ if (newvif) {
+ data.new_vif = NULL;
+ iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
+ }
+
+ /*
+ * The FW's scheduling session consists of
+ * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+ * equally between all the bindings that require quota
+ */
+ num_active_bindings = 0;
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ if (data.n_interfaces[i] > 0)
+ num_active_bindings++;
+ }
+
+ if (!num_active_bindings)
+ goto send_cmd;
+
+ quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
+ quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+
+ for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+ if (data.n_interfaces[i] <= 0)
+ continue;
+
+ cmd.quotas[idx].id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+ cmd.quotas[idx].quota = cpu_to_le32(quota);
+ cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ idx++;
+ }
+
+ /* Give the remainder of the session to the first binding */
+ le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+
+send_cmd:
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
new file mode 100644
index 00000000000..56b636d9ab3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -0,0 +1,3080 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+#define RS_NAME "iwl-mvm-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY 1
+#define IWL_HT_NUMBER_TRY 3
+
+#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_SISO_##s##M_PLCP, \
+ IWL_RATE_MIMO2_##s##M_PLCP,\
+ IWL_RATE_MIMO3_##s##M_PLCP,\
+ IWL_RATE_##r##M_IEEE, \
+ IWL_RATE_##ip##M_INDEX, \
+ IWL_RATE_##in##M_INDEX, \
+ IWL_RATE_##rp##M_INDEX, \
+ IWL_RATE_##rn##M_INDEX, \
+ IWL_RATE_##pp##M_INDEX, \
+ IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+ /* FIXME:RS: ^^ should be INV (legacy) */
+};
+
+static inline u8 rs_extract_rate(u32 rate_n_flags)
+{
+ /* also works for HT because bits 7:6 are zero there */
+ return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
+}
+
+static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = rs_extract_rate(rate_n_flags);
+
+ if (idx >= IWL_RATE_MIMO3_6M_PLCP)
+ idx = idx - IWL_RATE_MIMO3_6M_PLCP;
+ else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+ idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+ idx += IWL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx += 1;
+ if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
+ if (iwl_rates[idx].plcp ==
+ rs_extract_rate(rate_n_flags))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta);
+static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index);
+#else
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
+ {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+ {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+ {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+ {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+ {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
+ {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
+ {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
+ {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
+ {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
+ {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
+ {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM (8)
+
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+ window->data = 0;
+ window->success_counter = 0;
+ window->success_ratio = IWL_INVALID_VALUE;
+ window->counter = 0;
+ window->average_tpt = IWL_INVALID_VALUE;
+ window->stamp = 0;
+}
+
+static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the statistics. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count &&
+ (tl->time_stamp < oldest_time)) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else {
+ return IWL_MAX_TID_COUNT;
+ }
+
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
+ return IWL_MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return IWL_MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ rs_tl_rm_old_stats(tl, curr_time);
+
+ index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[index] = tl->packet_count[index] + 1;
+ tl->total = tl->total + 1;
+
+ if ((index + 1) > tl->queue_count)
+ tl->queue_count = index + 1;
+
+ return tid;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
+ }
+}
+#endif
+
+/*
+ get the traffic load value for tid
+*/
+static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+
+ if (tid >= IWL_MAX_TID_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_data, u8 tid,
+ struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = rs_tl_get_load(lq_data, tid);
+
+ if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
+ IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else {
+ IWL_DEBUG_HT(mvm,
+ "Aggregation not enabled for tid %d because load = %u\n",
+ tid, load);
+ }
+ return ret;
+}
+
+static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
+ struct iwl_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < IWL_MAX_TID_COUNT)
+ rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
+ else
+ IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
+}
+
+static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
+/**
+ * rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate. window->data contains the bitmask of successful
+ * packets.
+ */
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+ static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+
+ /* Get expected throughput */
+ tpt = get_expected_tpt(tbl, scale_index);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history window; anything older isn't really relevant any more.
+ * If we have filled up the sliding window, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (window->counter >= IWL_RATE_MAX_WINDOW) {
+ /* remove earliest */
+ window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+ if (window->data & mask) {
+ window->data &= ~mask;
+ window->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ window->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ window->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ window->success_counter++;
+ window->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (window->counter > 0)
+ window->success_ratio = 128 * (100 * window->success_counter)
+ / window->counter;
+ else
+ window->success_ratio = IWL_INVALID_VALUE;
+
+ fail_count = window->counter - window->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+ (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+ window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+ else
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Tag this window as having been updated */
+ window->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+/* FIXME:RS:remove this function and put the flags statically in the table */
+static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = iwl_rates[index].plcp;
+ if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+ } else if (is_Ht(tbl->lq_type)) {
+ if (index > IWL_LAST_OFDM_RATE) {
+ IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
+ index = IWL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_siso;
+ else if (is_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_mimo2;
+ else
+ rate_n_flags |= iwl_rates[index].plcp_mimo3;
+ } else {
+ IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40)
+ rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_HT_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IWL_ERR(mvm, "GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct iwl_scale_tbl_info *tbl,
+ int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+ *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == IWL_RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IWL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
+ tbl->is_ht40 = 1;
+
+ mcs = rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+ if (num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE*/
+ /* MIMO2 */
+ } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
+ if (num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ /* MIMO3 */
+ } else {
+ if (num_of_ant == 3) {
+ tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+ tbl->lq_type = LQ_MIMO3;
+ }
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct iwl_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while ((new_ant_type != tbl->ant_type) &&
+ !rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool rs_use_green(struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+
+ bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green;
+}
+
+/**
+ * rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum iwl_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else if (is_mimo2(rate_type))
+ return lq_sta->active_mimo2_rate;
+ else
+ return lq_sta->active_mimo3_rate;
+ }
+}
+
+static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = IWL_RATE_INVALID;
+ u8 low = IWL_RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = index - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = index + 1;
+ for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = index;
+ while (low != IWL_RATE_INVALID) {
+ low = iwl_rates[low].prev_rs;
+ if (low == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
+ }
+
+ high = index;
+ while (high != IWL_RATE_INVALID) {
+ high = iwl_rates[high].next_rs;
+ if (high == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ u8 scale_index, u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct iwl_mvm *mvm = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+ switch_to_legacy = 1;
+ scale_index = rs_ht_to_legacy[scale_index];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ first_antenna(mvm->nvm_data->valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ }
+
+ rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask = (u16)(rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+ low = scale_index;
+ goto out;
+ }
+
+ high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == IWL_RATE_INVALID)
+ low = scale_index;
+
+out:
+ return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool table_type_matches(struct iwl_scale_tbl_info *a,
+ struct iwl_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+ (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_index, mac_index, i;
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_lq_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct iwl_scale_tbl_info tbl_type;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+
+ IWL_DEBUG_RATE_LIMIT(mvm,
+ "get frame ack response, update rate scale window\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0]);
+ rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rs_index -= IWL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_index = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ /* Remove # of streams */
+ mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
+ /*
+ * mac80211 HT index is always zero-indexed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (info->band == IEEE80211_BAND_2GHZ)
+ mac_index += IWL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if ((mac_index < 0) ||
+ (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.ant_type != info->status.antenna) ||
+ (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ (rs_index != mac_index)) {
+ IWL_DEBUG_RATE(mvm,
+ "initial rate %d does not match %d (0x%x)\n",
+ mac_index, rs_index, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (table_type_matches(&tbl_type,
+ &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else if (table_type_matches(
+ &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type,
+ tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type,
+ tmp_tbl->is_SGI);
+ IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
+ tbl_type.lq_type, tbl_type.ant_type,
+ tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0]);
+ rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
+ &rs_index);
+ rs_collect_tx_data(curr_tbl, rs_index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i]);
+ rs_get_tbl_info_from_mcs(tx_rate, info->band,
+ &tbl_type, &rs_index);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (table_type_matches(&tbl_type, other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta && sta->supp_rates[sband->band])
+ rs_rate_scale_perform(mvm, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
+ struct iwl_lq_sta *lq_sta)
+{
+ IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && !tbl->is_ht40)
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else if (is_mimo2(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
+ ht_tbl_pointer = expected_tpt_mimo3_20MHz;
+ else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 rs_get_best_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 index)
+{
+ /* "active" values */
+ struct iwl_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[index].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[index];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = index;
+
+ new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+ while (1) {
+ high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+ ((active_sr > IWL_RATE_DECREASE_TH) &&
+ (active_sr <= IWL_RATE_HIGH_TH) &&
+ (tpt_tbl[rate] <= active_tpt))) ||
+ ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+ (tpt_tbl[rate] > active_tpt))) {
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != IWL_RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != IWL_RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != IWL_RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != IWL_RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+{
+ return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+
+ if (!sta->ht_cap.ht_supported)
+ return -1;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 2)
+ return -1;
+
+ IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (iwl_is_ht40_tx_allowed(sta))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
+ rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for MIMO3
+ */
+static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+
+ if (!sta->ht_cap.ht_supported)
+ return -1;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 3)
+ return -1;
+
+ IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
+
+ tbl->lq_type = LQ_MIMO3;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+ rate_mask = lq_sta->active_mimo3_rate;
+
+ if (iwl_is_ht40_tx_allowed(sta))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
+ rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int rs_switch_to_siso(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+
+ if (!sta->ht_cap.ht_supported)
+ return -1;
+
+ IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
+
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (iwl_is_ht40_tx_allowed(sta))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm,
+ "can not switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int rs_move_legacy_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ int index)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ int ret;
+ u8 update_search_tbl_counter = 0;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_LEGACY_SWITCH_ANTENNA1:
+ case IWL_LEGACY_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ goto out;
+ }
+ break;
+ case IWL_LEGACY_SWITCH_SISO:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret = rs_switch_to_siso(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IWL_LEGACY_SWITCH_MIMO2_AB:
+ case IWL_LEGACY_SWITCH_MIMO2_AC:
+ case IWL_LEGACY_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+
+ case IWL_LEGACY_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
+
+ /* Set up search table to try MIMO3 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int rs_move_siso_to_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta, int index)
+{
+ u8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_SISO_SWITCH_ANTENNA1:
+ case IWL_SISO_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_SISO_SWITCH_MIMO2_AB:
+ case IWL_SISO_SWITCH_MIMO2_AC:
+ case IWL_SISO_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
+ case IWL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IWL_ERR(mvm,
+ "SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(mvm, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ case IWL_SISO_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO2_SWITCH_ANTENNA1:
+ case IWL_MIMO2_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_MIMO2_SWITCH_SISO_A:
+ case IWL_MIMO2_SWITCH_SISO_B:
+ case IWL_MIMO2_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_siso(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(mvm, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ case IWL_MIMO2_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO3
+ */
+static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ u8 tx_chains_num = num_of_ant(valid_tx_ant);
+ int ret;
+ u8 update_search_tbl_counter = 0;
+
+ start_action = tbl->action;
+ while (1) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO3_SWITCH_ANTENNA1:
+ case IWL_MIMO3_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
+
+ if (tx_chains_num <= 3)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate,
+ search_tbl))
+ goto out;
+ break;
+ case IWL_MIMO3_SWITCH_SISO_A:
+ case IWL_MIMO3_SWITCH_SISO_B:
+ case IWL_MIMO3_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_siso(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO3_SWITCH_MIMO2_AB:
+ case IWL_MIMO3_SWITCH_MIMO2_AC:
+ case IWL_MIMO3_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
+
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO3_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(mvm, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO3_SWITCH_GI)
+ tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO3_SWITCH_GI)
+ tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct iwl_mvm *mvm;
+
+ mvm = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ IWL_RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+ (lq_sta->total_success > lq_sta->max_success_limit) ||
+ ((!lq_sta->search_better_tbl) &&
+ (lq_sta->flush_timer) && (flush_interval_passed))) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: stay is expired %d %d %d\n",
+ lq_sta->total_failed,
+ lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >=
+ lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ IWL_DEBUG_RATE(mvm,
+ "LQ: stay in table clear win\n");
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ rs_fill_link_cmd(mvm, lq_sta, rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = IWL_RATE_INVALID;
+ int high = IWL_RATE_INVALID;
+ int index;
+ int i;
+ struct iwl_rate_scale_data *window = NULL;
+ int current_tpt = IWL_INVALID_VALUE;
+ int low_tpt = IWL_INVALID_VALUE;
+ int high_tpt = IWL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct iwl_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_index_msk = 0;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = IWL_MAX_TID_COUNT;
+ struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data;
+
+ IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = rs_tl_add_packet(lq_sta, hdr);
+ if ((tid != IWL_MAX_TID_COUNT) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &sta_priv->tid_data[tid];
+ if (tid_data->state == IWL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else {
+ lq_sta->is_agg = 0;
+ }
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ index = lq_sta->last_txrate_idx;
+
+ IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
+ tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_index_msk = (u16) (rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_scale_index_msk = (u16) (rate_mask &
+ lq_sta->supp_rates);
+
+ } else {
+ rate_scale_index_msk = rate_mask;
+ }
+
+ if (!rate_scale_index_msk)
+ rate_scale_index_msk = rate_mask;
+
+ if (!((1 << index) & rate_scale_index_msk)) {
+ IWL_ERR(mvm, "Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid*/
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history window for current rate */
+ if (!tbl->expected_tpt) {
+ IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < index)) {
+ index = lq_sta->max_rate_idx;
+ update_lq = 1;
+ window = &(tbl->win[index]);
+ goto lq_update;
+ }
+
+ window = &(tbl->win[index]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = window->counter - window->success_counter;
+ if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+ (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: still below TH. succ=%d total=%d for index %d\n",
+ window->success_counter, window->counter, index);
+
+ /* Can't calculate this yet; not enough history */
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(mvm,
+ "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (window->average_tpt > lq_sta->last_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = window->average_tpt;
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < high))
+ high = IWL_RATE_INVALID;
+
+ sr = window->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = window->average_tpt;
+ if (low != IWL_RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != IWL_RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+ IWL_DEBUG_RATE(mvm,
+ "decrease rate because of low success_ratio\n");
+ scale_action = -1;
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != IWL_RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt))
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ } else {
+ scale_action = 0;
+ }
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low]))))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ IWL_DEBUG_RATE(mvm,
+ "choose rate scale index %d action %d low %d high %d type %d\n",
+ index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+
+ rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search &&
+ !lq_sta->stay_in_tbl && window->counter) {
+ /* Save current throughput to compare with "search" throughput*/
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ rs_move_legacy_other(mvm, lq_sta, sta, index);
+ else if (is_siso(tbl->lq_type))
+ rs_move_siso_to_other(mvm, lq_sta, sta, index);
+ else if (is_mimo2(tbl->lq_type))
+ rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
+ else
+ rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+
+ IWL_DEBUG_RATE(mvm,
+ "Switch current mcs: %X index: %d\n",
+ tbl->current_rate, index);
+ rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ } else {
+ done_search = 1;
+ }
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
+ lq_sta->action_counter > tbl1->max_search) {
+ IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
+ rs_set_stay_in_table(mvm, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ (lq_sta->action_counter >= tbl1->max_search)) {
+ if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ (tid != IWL_MAX_TID_COUNT)) {
+ tid_data = &sta_priv->tid_data[tid];
+ if (tid_data->state == IWL_AGG_OFF) {
+ IWL_DEBUG_RATE(mvm,
+ "try to aggregate tid %d\n",
+ tid);
+ rs_tl_turn_on_agg(mvm, tid,
+ lq_sta, sta);
+ }
+ }
+ rs_set_stay_in_table(mvm, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ lq_sta->last_txrate_idx = index;
+}
+
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void rs_initialize_lq(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ enum ieee80211_band band)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+
+ if (!sta || !lq_sta)
+ return;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if ((i < 0) || (i >= IWL_RATE_COUNT))
+ i = 0;
+
+ rate = iwl_rates[i].plcp;
+ tbl->ant_type = first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
+ if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_fill_link_cmd(NULL, lq_sta, rate);
+ /* TODO restore station should remember the lq cmd */
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct iwl_op_mode *op_mode __maybe_unused =
+ (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_lq_sta *lq_sta = mvm_sta;
+ int rate_idx;
+
+ IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (lq_sta->max_rate_idx != -1))
+ lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+ if ((lq_sta->max_rate_idx < 0) ||
+ (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ mvm_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, mvm_sta, txrc))
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS index */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO3_6M_PLCP)
+ rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
+ else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
+ info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+ ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (rate_idx < IWL_FIRST_OFDM_RATE)))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust index */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+}
+
+static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
+{
+ struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+ struct iwl_op_mode *op_mode __maybe_unused =
+ (struct iwl_op_mode *)mvm_rate;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, j;
+ struct ieee80211_hw *hw = mvm->hw;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct iwl_mvm_sta *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+ unsigned long supp; /* must be unsigned long for for_each_set_bit */
+
+ sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[band];
+
+ lq_sta->lq.sta_id = sta_priv->sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+ IWL_DEBUG_RATE(mvm,
+ "LQ: *** rate scale station global init for station %d ***\n",
+ sta_priv->sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+ lq_sta->is_green = rs_use_green(sta);
+ lq_sta->band = sband->band;
+ /*
+ * active legacy rates as per supported rates bitmap
+ */
+ supp = sta->supp_rates[sband->band];
+ lq_sta->active_legacy_rate = 0;
+ for_each_set_bit(i, &supp, BITS_PER_LONG)
+ lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
+ lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
+ lq_sta->active_mimo3_rate &= ~((u16)0x2);
+ lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
+
+ IWL_DEBUG_RATE(mvm,
+ "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+ lq_sta->active_siso_rate,
+ lq_sta->active_mimo2_rate,
+ lq_sta->active_mimo3_rate);
+
+ /* These values will be overridden later */
+ lq_sta->lq.single_stream_ant_msk =
+ first_antenna(mvm->nvm_data->valid_tx_ant);
+ lq_sta->lq.dual_stream_ant_msk =
+ mvm->nvm_data->valid_tx_ant &
+ ~first_antenna(mvm->nvm_data->valid_tx_ant);
+ if (!lq_sta->lq.dual_stream_ant_msk) {
+ lq_sta->lq.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(mvm->nvm_data->valid_tx_ant) == 2) {
+ lq_sta->lq.dual_stream_ant_msk =
+ mvm->nvm_data->valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+ lq_sta->drv = mvm;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ rs_initialize_lq(mvm, sta, lq_sta, band);
+}
+
+static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+ struct iwl_scale_tbl_info tbl_type;
+ int index = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (index 0) if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Interpret new_rate (rate_n_flags) */
+ rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = min(IWL_HT_NUMBER_TRY,
+ LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+ }
+
+ lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (index 0) */
+ lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+
+ if (num_of_ant(tbl_type.ant_type) == 1)
+ lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
+ else if (num_of_ant(tbl_type.ant_type) == 2)
+ lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
+ /* otherwise we don't modify the existing value */
+
+ index++;
+ repeat_rate--;
+ if (mvm)
+ valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (index < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (mvm &&
+ rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index] =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ index++;
+ }
+
+ rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+ &rate_idx);
+
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->mimo_delim = index;
+
+ /* Get next rate */
+ new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (mvm &&
+ rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = IWL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+
+ index++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void rs_free(void *mvm_rate)
+{
+ return;
+}
+
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
+ void *mvm_sta)
+{
+ struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+ IWL_DEBUG_RATE(mvm, "enter\n");
+ IWL_DEBUG_RATE(mvm, "leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{
+ struct iwl_mvm *mvm;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ mvm = lq_sta->drv;
+ valid_tx_ant = mvm->nvm_data->valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+ >> RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IWL_ERR(mvm,
+ "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ }
+ } else {
+ IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ }
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm *mvm;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+
+
+ mvm = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ rs_program_fix_rate(mvm, lq_sta);
+
+ return count;
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int index = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm *mvm;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ mvm = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+ lq_sta->dbg_fixed_rate);
+ desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+ (mvm->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (mvm->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (mvm->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc += sprintf(buff+desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" :
+ ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+ desc += sprintf(buff+desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc += sprintf(buff+desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc += sprintf(buff+desc,
+ "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.flags,
+ lq_sta->lq.mimo_delim,
+ lq_sta->lq.single_stream_ant_msk,
+ lq_sta->lq.dual_stream_ant_msk);
+
+ desc += sprintf(buff+desc,
+ "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_time_limit),
+ lq_sta->lq.agg_disable_start_th,
+ lq_sta->lq.agg_frame_cnt_limit);
+
+ desc += sprintf(buff+desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.initial_rate_index[0],
+ lq_sta->lq.initial_rate_index[1],
+ lq_sta->lq.initial_rate_index[2],
+ lq_sta->lq.initial_rate_index[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ index = iwl_hwrate_to_plcp_idx(
+ le32_to_cpu(lq_sta->lq.rs_table[i]));
+ if (is_legacy(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+ i, le32_to_cpu(lq_sta->lq.rs_table[i]),
+ iwl_rate_mcs[index].mbps);
+ } else {
+ desc += sprintf(buff+desc,
+ " rate[%d] 0x%X %smbps (%s)\n",
+ i, le32_to_cpu(lq_sta->lq.rs_table[i]),
+ iwl_rate_mcs[index].mbps,
+ iwl_rate_mcs[index].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = rs_sta_dbgfs_scale_table_write,
+ .read = rs_sta_dbgfs_scale_table_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc += sprintf(buff+desc,
+ "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+ "rate=0x%X\n",
+ lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < IWL_RATE_COUNT; j++) {
+ desc += sprintf(buff+desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = rs_sta_dbgfs_stats_table_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ char buff[120];
+ int desc = 0;
+
+ if (is_Ht(tbl->lq_type))
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = rs_sta_dbgfs_rate_scale_data_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+{
+ struct iwl_lq_sta *lq_sta = mvm_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+}
+
+static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+{
+ struct iwl_lq_sta *lq_sta = mvm_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *mvm_sta)
+{
+}
+static struct rate_control_ops rs_mvm_ops = {
+ .module = NULL,
+ .name = RS_NAME,
+ .tx_status = rs_tx_status,
+ .get_rate = rs_get_rate,
+ .rate_init = rs_rate_init_stub,
+ .alloc = rs_alloc,
+ .free = rs_free,
+ .alloc_sta = rs_alloc_sta,
+ .free_sta = rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = rs_add_debugfs,
+ .remove_sta_debugfs = rs_remove_debugfs,
+#endif
+};
+
+int iwl_mvm_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_mvm_ops);
+}
+
+void iwl_mvm_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_mvm_ops);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
new file mode 100644
index 00000000000..219c6857cc0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -0,0 +1,393 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __rs_h__
+#define __rs_h__
+
+#include <net/mac80211.h>
+
+#include "iwl-config.h"
+
+#include "fw-api.h"
+#include "iwl-trans.h"
+
+struct iwl_rs_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+#define IWL_RATE_60M_PLCP 3
+
+enum {
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+enum {
+ IWL_RATE_6M_INDEX_TABLE = 0,
+ IWL_RATE_9M_INDEX_TABLE,
+ IWL_RATE_12M_INDEX_TABLE,
+ IWL_RATE_18M_INDEX_TABLE,
+ IWL_RATE_24M_INDEX_TABLE,
+ IWL_RATE_36M_INDEX_TABLE,
+ IWL_RATE_48M_INDEX_TABLE,
+ IWL_RATE_54M_INDEX_TABLE,
+ IWL_RATE_1M_INDEX_TABLE,
+ IWL_RATE_2M_INDEX_TABLE,
+ IWL_RATE_5M_INDEX_TABLE,
+ IWL_RATE_11M_INDEX_TABLE,
+ IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
+#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
+#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
+#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
+#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
+#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
+#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
+#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
+#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
+#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
+#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
+#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
+
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ IWL_RATE_SISO_6M_PLCP = 0,
+ IWL_RATE_SISO_12M_PLCP = 1,
+ IWL_RATE_SISO_18M_PLCP = 2,
+ IWL_RATE_SISO_24M_PLCP = 3,
+ IWL_RATE_SISO_36M_PLCP = 4,
+ IWL_RATE_SISO_48M_PLCP = 5,
+ IWL_RATE_SISO_54M_PLCP = 6,
+ IWL_RATE_SISO_60M_PLCP = 7,
+ IWL_RATE_MIMO2_6M_PLCP = 0x8,
+ IWL_RATE_MIMO2_12M_PLCP = 0x9,
+ IWL_RATE_MIMO2_18M_PLCP = 0xa,
+ IWL_RATE_MIMO2_24M_PLCP = 0xb,
+ IWL_RATE_MIMO2_36M_PLCP = 0xc,
+ IWL_RATE_MIMO2_48M_PLCP = 0xd,
+ IWL_RATE_MIMO2_54M_PLCP = 0xe,
+ IWL_RATE_MIMO2_60M_PLCP = 0xf,
+ IWL_RATE_MIMO3_6M_PLCP = 0x10,
+ IWL_RATE_MIMO3_12M_PLCP = 0x11,
+ IWL_RATE_MIMO3_18M_PLCP = 0x12,
+ IWL_RATE_MIMO3_24M_PLCP = 0x13,
+ IWL_RATE_MIMO3_36M_PLCP = 0x14,
+ IWL_RATE_MIMO3_48M_PLCP = 0x15,
+ IWL_RATE_MIMO3_54M_PLCP = 0x16,
+ IWL_RATE_MIMO3_60M_PLCP = 0x17,
+ IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ IWL_RATE_6M_IEEE = 12,
+ IWL_RATE_9M_IEEE = 18,
+ IWL_RATE_12M_IEEE = 24,
+ IWL_RATE_18M_IEEE = 36,
+ IWL_RATE_24M_IEEE = 48,
+ IWL_RATE_36M_IEEE = 72,
+ IWL_RATE_48M_IEEE = 96,
+ IWL_RATE_54M_IEEE = 108,
+ IWL_RATE_60M_IEEE = 120,
+ IWL_RATE_1M_IEEE = 2,
+ IWL_RATE_2M_IEEE = 4,
+ IWL_RATE_5M_IEEE = 11,
+ IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+
+#define IWL_INVALID_VALUE -1
+
+#define IWL_MIN_RSSI_VAL -100
+#define IWL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT 160
+#define IWL_LEGACY_SUCCESS_LIMIT 480
+#define IWL_LEGACY_TABLE_COUNT 160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_NONE_LEGACY_TABLE_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO 12800 /* 100% */
+#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
+#define IWL_RATE_HIGH_TH 10880 /* 85% */
+#define IWL_RATE_INCREASE_TH 6400 /* 50% */
+#define IWL_RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1 0
+#define IWL_LEGACY_SWITCH_ANTENNA2 1
+#define IWL_LEGACY_SWITCH_SISO 2
+#define IWL_LEGACY_SWITCH_MIMO2_AB 3
+#define IWL_LEGACY_SWITCH_MIMO2_AC 4
+#define IWL_LEGACY_SWITCH_MIMO2_BC 5
+#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1 0
+#define IWL_SISO_SWITCH_ANTENNA2 1
+#define IWL_SISO_SWITCH_MIMO2_AB 2
+#define IWL_SISO_SWITCH_MIMO2_AC 3
+#define IWL_SISO_SWITCH_MIMO2_BC 4
+#define IWL_SISO_SWITCH_GI 5
+#define IWL_SISO_SWITCH_MIMO3_ABC 6
+
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1 0
+#define IWL_MIMO2_SWITCH_ANTENNA2 1
+#define IWL_MIMO2_SWITCH_SISO_A 2
+#define IWL_MIMO2_SWITCH_SISO_B 3
+#define IWL_MIMO2_SWITCH_SISO_C 4
+#define IWL_MIMO2_SWITCH_GI 5
+#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
+
+
+/* possible actions when in mimo3 mode */
+#define IWL_MIMO3_SWITCH_ANTENNA1 0
+#define IWL_MIMO3_SWITCH_ANTENNA2 1
+#define IWL_MIMO3_SWITCH_SISO_A 2
+#define IWL_MIMO3_SWITCH_SISO_B 3
+#define IWL_MIMO3_SWITCH_SISO_C 4
+#define IWL_MIMO3_SWITCH_MIMO2_AB 5
+#define IWL_MIMO3_SWITCH_MIMO2_AC 6
+#define IWL_MIMO3_SWITCH_MIMO2_BC 7
+#define IWL_MIMO3_SWITCH_GI 8
+
+
+#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
+
+/*FIXME:RS:add possible actions for MIMO3*/
+
+#define IWL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD 0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+enum iwl_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MIMO3,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
+#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+ enum iwl_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+ unsigned long time_stamp; /* age of the oldest statistics */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+ u8 active_tbl; /* index of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ u16 active_mimo3_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct iwl_lq_cmd lq;
+ struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct iwl_mvm *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+ /* BT traffic this sta was last updated in */
+ u8 last_bt_traffic;
+};
+
+static inline u8 num_of_ant(u8 mask)
+{
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
+}
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_band band);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl_mvm_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl_mvm_rate_control_unregister(void);
+
+#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
new file mode 100644
index 00000000000..3f3ce91ad5c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -0,0 +1,355 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include "iwl-trans.h"
+
+#include "mvm.h"
+#include "fw-api.h"
+
+/*
+ * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
+ *
+ * Copies the phy information in mvm->last_phy_info, it will be used when the
+ * actual data will come from the fw in the next packet.
+ */
+int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+ mvm->ampdu_ref++;
+ return 0;
+}
+
+/*
+ * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
+ *
+ * Adds the rxb to a new skb and give it to mac80211
+ */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct ieee80211_hdr *hdr, u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ unsigned int hdrlen, fraglen;
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
+ * are more efficient.
+ */
+ hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
+
+ memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+ fraglen = len - hdrlen;
+
+ if (fraglen) {
+ int offset = (void *)hdr + hdrlen -
+ rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx_ni(mvm->hw, skb);
+}
+
+/*
+ * iwl_mvm_calc_rssi - calculate the rssi in dBm
+ * @phy_info: the phy information for the coming packet
+ */
+static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info)
+{
+ u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
+ u32 val;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the Digital Signal Processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's Automatic Gain Control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
+ rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
+ rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
+ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
+ rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
+
+ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
+ agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc_db - IWL_RSSI_OFFSET;
+}
+
+/*
+ * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
+ * @mvm: the mvm object
+ * @hdr: 80211 header
+ * @stats: status in mac80211's format
+ * @rx_pkt_status: status coming from fw
+ *
+ * returns non 0 value if the packet should be dropped
+ */
+static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *stats,
+ u32 rx_pkt_status)
+{
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_NO_ENC)
+ return 0;
+
+ /* packet was encrypted with unknown alg */
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR)
+ return 0;
+
+ switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+ case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
+ /* alg is CCM: check MIC only */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
+ return 0;
+
+ case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
+ /* Don't drop the frame and decrypt it in SW */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+ return 0;
+ /* fall through if TTAK OK */
+
+ case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+
+ default:
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+ }
+
+ return 0;
+}
+
+/*
+ * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status rx_status = {};
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_info *phy_info;
+ struct iwl_rx_mpdu_res_start *rx_res;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+ u32 rx_pkt_status;
+
+ phy_info = &mvm->last_phy_info;
+ rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+ len = le16_to_cpu(rx_res->byte_count);
+ rx_pkt_status = le32_to_cpup((__le32 *)
+ (pkt->data + sizeof(*rx_res) + len));
+
+ memset(&rx_status, 0, sizeof(rx_status));
+
+ /*
+ * drop the packet if it has failed being decrypted by HW
+ */
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) {
+ IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+ rx_pkt_status);
+ return 0;
+ }
+
+ if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
+ phy_info->cfg_phy_cnt);
+ return 0;
+ }
+
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+ IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+ return 0;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_info->timestamp);
+ rx_status.band =
+ (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
+ rx_status.band);
+ /*
+ * TSF as indicated by the fw is at INA time, but mac80211 expects the
+ * TSF at the beginning of the MPDU.
+ */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info);
+
+ IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
+ (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ /*
+ * We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them
+ */
+ rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status.ampdu_reference = mvm->ampdu_ref;
+ }
+
+ /* Set up the HT phy flags */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status.flag |= RX_FLAG_40MHZ;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status.flag |= RX_FLAG_80MHZ;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status.flag |= RX_FLAG_160MHZ;
+ break;
+ }
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ rx_status.flag |= RX_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ rx_status.flag |= RX_FLAG_HT;
+ rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ rx_status.vht_nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status.flag |= RX_FLAG_VHT;
+ } else {
+ rx_status.rate_idx =
+ iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status.band);
+ }
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
+ rxb, &rx_status);
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
new file mode 100644
index 00000000000..406c53ad0a4
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -0,0 +1,437 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "fw-api-scan.h"
+
+#define IWL_PLCP_QUIET_THRESH 1
+#define IWL_ACTIVE_QUIET_TIME 10
+
+static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
+{
+ u16 rx_chain;
+ u8 rx_ant = mvm->nvm_data->valid_rx_ant;
+
+ rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
+ return cpu_to_le16(rx_chain);
+}
+
+static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.assoc)
+ return cpu_to_le32(200 * 1024);
+ else
+ return 0;
+}
+
+static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.assoc)
+ return cpu_to_le32(vif->bss_conf.beacon_int);
+ else
+ return 0;
+}
+
+static inline __le32
+iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
+{
+ if (req->channels[0]->band == IEEE80211_BAND_2GHZ)
+ return cpu_to_le32(PHY_BAND_24);
+ else
+ return cpu_to_le32(PHY_BAND_5);
+}
+
+static inline __le32
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+ bool no_cck)
+{
+ u32 tx_ant;
+
+ mvm->scan_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+ mvm->scan_last_antenna_idx);
+ tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ if (band == IEEE80211_BAND_2GHZ && !no_cck)
+ return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
+ tx_ant);
+ else
+ return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
+}
+
+/*
+ * We insert the SSIDs in an inverted order, because the FW will
+ * invert it back. The most prioritized SSID, which is first in the
+ * request list, is not copied here, but inserted directly to the probe
+ * request.
+ */
+static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
+ struct cfg80211_scan_request *req)
+{
+ int fw_idx, req_idx;
+
+ fw_idx = 0;
+ for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+ cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
+ cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
+ memcpy(cmd->direct_scan[fw_idx].ssid,
+ req->ssids[req_idx].ssid,
+ req->ssids[req_idx].ssid_len);
+ }
+}
+
+/*
+ * If req->n_ssids > 0, it means we should do an active scan.
+ * In case of active scan w/o directed scan, we receive a zero-length SSID
+ * just to notify that this scan is active and not passive.
+ * In order to notify the FW of the number of SSIDs we wish to scan (including
+ * the zero-length one), we need to set the corresponding bits in chan->type,
+ * one for each SSID, and set the active bit (first).
+ */
+static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+{
+ if (band == IEEE80211_BAND_2GHZ)
+ return 30 + 3 * (n_ssids + 1);
+ return 20 + 2 * (n_ssids + 1);
+}
+
+static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+{
+ return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
+}
+
+static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
+ struct cfg80211_scan_request *req)
+{
+ u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
+ u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
+ req->n_ssids);
+ struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
+ (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
+ int i;
+ __le32 chan_type_value;
+
+ if (req->n_ssids > 0)
+ chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
+ else
+ chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
+
+ for (i = 0; i < cmd->channel_count; i++) {
+ chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+ if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ chan->type = chan_type_value;
+ chan->active_dwell = cpu_to_le16(active_dwell);
+ chan->passive_dwell = cpu_to_le16(passive_dwell);
+ chan->iteration_count = cpu_to_le16(1);
+ chan++;
+ }
+}
+
+/*
+ * Fill in probe request with the following parameters:
+ * TA is our vif HW address, which mac80211 ensures we have.
+ * Packet is broadcasted, so this is both SA and DA.
+ * The probe request IE is made out of two: first comes the most prioritized
+ * SSID if a directed scan is requested. Second comes whatever extra
+ * information was given to us as the scan request IE.
+ */
+static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
+ int n_ssids, const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len,
+ int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ eth_broadcast_addr(frame->da);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ eth_broadcast_addr(frame->bssid);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* for passive scans, no need to fill anything */
+ if (n_ssids == 0)
+ return (u16)len;
+
+ /* points to the payload of the request */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our SSID IE */
+ left -= ssid_len + 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = ssid_len;
+ if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+ }
+
+ len += ssid_len + 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ie && ie_len) {
+ memcpy(pos, ie, ie_len);
+ len += ie_len;
+ }
+
+ return (u16)len;
+}
+
+int iwl_mvm_scan_request(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_REQUEST_CMD,
+ .len = { 0, },
+ .data = { mvm->scan_cmd, },
+ .flags = CMD_SYNC,
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_scan_cmd *cmd = mvm->scan_cmd;
+ int ret;
+ u32 status;
+ int ssid_len = 0;
+ u8 *ssid = NULL;
+
+ lockdep_assert_held(&mvm->mutex);
+ BUG_ON(mvm->scan_cmd == NULL);
+
+ IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
+ mvm->scan_status = IWL_MVM_SCAN_OS;
+ memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
+ mvm->fw->ucode_capa.max_probe_length +
+ (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
+
+ cmd->channel_count = (u8)req->n_channels;
+ cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+ cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+ cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
+ cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
+ cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+ cmd->type = SCAN_TYPE_FORCED;
+ cmd->repeats = cpu_to_le32(1);
+
+ /*
+ * If the user asked for passive scan, don't change to active scan if
+ * you see any activity on the channel - remain passive.
+ */
+ if (req->n_ssids > 0) {
+ cmd->passive2active = cpu_to_le16(1);
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ } else {
+ cmd->passive2active = 0;
+ }
+
+ iwl_mvm_scan_fill_ssids(cmd, req);
+
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
+ cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ cmd->tx_cmd.rate_n_flags =
+ iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
+ req->no_cck);
+
+ cmd->tx_cmd.len =
+ cpu_to_le16(iwl_mvm_fill_probe_req(
+ (struct ieee80211_mgmt *)cmd->data,
+ vif->addr,
+ req->n_ssids, ssid, ssid_len,
+ req->ie, req->ie_len,
+ mvm->fw->ucode_capa.max_probe_length));
+
+ iwl_mvm_scan_fill_channels(cmd, req);
+
+ cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
+ le16_to_cpu(cmd->tx_cmd.len) +
+ (cmd->channel_count * sizeof(struct iwl_scan_channel)));
+ hcmd.len[0] = le16_to_cpu(cmd->len);
+
+ status = SCAN_RESPONSE_OK;
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
+ if (!ret && status == SCAN_RESPONSE_OK) {
+ IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
+ status, ret);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ret = -EIO;
+ }
+ return ret;
+}
+
+int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_cmd_response *resp = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
+ le32_to_cpu(resp->status));
+ return 0;
+}
+
+int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+
+ return 0;
+}
+
+static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_scan_complete_notif *notif;
+ u32 *resp;
+
+ switch (pkt->hdr.cmd) {
+ case SCAN_ABORT_CMD:
+ resp = (void *)pkt->data;
+ if (*resp == CAN_ABORT_STATUS) {
+ IWL_DEBUG_SCAN(mvm,
+ "Scan can be aborted, wait until completion\n");
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
+ *resp);
+ return true;
+
+ case SCAN_COMPLETE_NOTIFICATION:
+ notif = (void *)pkt->data;
+ IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
+ notif->status);
+ return true;
+
+ default:
+ WARN_ON(1);
+ return false;
+ };
+}
+
+void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+{
+ struct iwl_notification_wait wait_scan_abort;
+ static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
+ SCAN_COMPLETE_NOTIFICATION };
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
+ scan_abort_notif,
+ ARRAY_SIZE(scan_abort_notif),
+ iwl_mvm_scan_abort_notif, NULL);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+ goto out_remove_notif;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
+ if (ret)
+ IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
+
+ return;
+
+out_remove_notif:
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
new file mode 100644
index 00000000000..a1eb692d7fa
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -0,0 +1,1235 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "sta.h"
+
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+{
+ int sta_id;
+
+ WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+ for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+ if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex)))
+ return sta_id;
+ return IWL_MVM_STATION_COUNT;
+}
+
+/* send station add/update command to firmware */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd add_sta_cmd;
+ int ret;
+ u32 status;
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+ add_sta_cmd.sta_id = mvm_sta->sta_id;
+ add_sta_cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ if (!update) {
+ add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+ memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+ }
+ add_sta_cmd.add_modify = update ? 1 : 0;
+
+ /* STA_FLG_FAT_EN_MSK ? */
+ /* STA_FLG_MIMO_EN_MSK ? */
+
+ if (sta->ht_cap.ht_supported) {
+ add_sta_cmd.station_flags_msk |=
+ cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENS_MSK);
+
+ mpdu_dens = sta->ht_cap.ampdu_density;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ agg_size = sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ agg_size >>=
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ } else if (sta->ht_cap.ht_supported) {
+ agg_size = sta->ht_cap.ampdu_factor;
+ }
+
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "ADD_STA failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ int i, ret, sta_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ sta_id = iwl_mvm_find_free_sta_id(mvm);
+ else
+ sta_id = mvm_sta->sta_id;
+
+ if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+ return -ENOSPC;
+
+ spin_lock_init(&mvm_sta->lock);
+
+ mvm_sta->sta_id = sta_id;
+ mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color);
+ mvm_sta->vif = vif;
+ mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /* HW restart, don't assume the memory has been zeroed */
+ atomic_set(&mvm_sta->pending_frames, 0);
+ mvm_sta->tid_disable_agg = 0;
+ mvm_sta->tfd_queue_msk = 0;
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
+ mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
+
+ /* for HW restart - need to reset the seq_number etc... */
+ memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+ if (ret)
+ return ret;
+
+ /* The first station added is the AP, the others are TDLS STAs */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ mvmvif->ap_sta_id = sta_id;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+ return 0;
+}
+
+int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+}
+
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
+ mvmsta->sta_id);
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
+ mvmsta->sta_id);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+ .sta_id = sta_id,
+ };
+ int ret;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* Note: internal stations are marked as error values */
+ if (!sta) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC,
+ sizeof(rm_sta_cmd), &rm_sta_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_sta_drained_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
+ u8 sta_id;
+
+ /*
+ * The mutex is needed because of the SYNC cmd, but not only: if the
+ * work would run concurrently with iwl_mvm_rm_sta, it would run before
+ * iwl_mvm_rm_sta sets the station as busy, and exit. Then
+ * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
+ * that later.
+ */
+ mutex_lock(&mvm->mutex);
+
+ for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
+ int ret;
+ struct ieee80211_sta *sta =
+ rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This station is in use */
+ if (!IS_ERR(sta))
+ continue;
+
+ if (PTR_ERR(sta) == -EINVAL) {
+ IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
+ sta_id);
+ continue;
+ }
+
+ if (!sta) {
+ IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
+ sta_id);
+ continue;
+ }
+
+ WARN_ON(PTR_ERR(sta) != -EBUSY);
+ /* This station was removed and we waited until it got drained,
+ * we can now proceed and remove it.
+ */
+ ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Couldn't remove sta %d after it was drained\n",
+ sta_id);
+ continue;
+ }
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ clear_bit(sta_id, mvm->sta_drained);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ /*
+ * Put a non-NULL since the fw station isn't removed.
+ * It will be removed after the MAC will be set as
+ * unassoc.
+ */
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-EINVAL));
+
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+
+ /* if we are associated - we can't remove the AP STA now */
+ if (vif->bss_conf.assoc)
+ return ret;
+
+ /* unassoc - go ahead - remove the AP STA now */
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ }
+
+ /*
+ * There are frames pending on the AC queues for this station.
+ * We need to wait until all the frames are drained...
+ */
+ if (atomic_read(&mvm_sta->pending_frames)) {
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-EBUSY));
+ } else {
+ ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id)
+{
+ int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ return ret;
+}
+
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
+ u32 qmask)
+{
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+ if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
+ return -ENOSPC;
+ }
+
+ sta->tfd_queue_msk = qmask;
+
+ /* put a non-NULL value so iterating over the stations won't stop */
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+ return 0;
+}
+
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
+{
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+ sta->sta_id = IWL_MVM_STATION_COUNT;
+}
+
+static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr,
+ u16 mac_id, u16 color)
+{
+ struct iwl_mvm_add_sta_cmd cmd;
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+ cmd.sta_id = sta->sta_id;
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+ color));
+
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+
+ if (addr)
+ memcpy(cmd.addr, addr, ETH_ALEN);
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+ return 0;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
+ status);
+ break;
+ }
+ return ret;
+}
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Add the aux station, but without any queues */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+ MAC_INDEX_AUX, 0);
+
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+ return ret;
+}
+
+/*
+ * Send the add station command for the vif's broadcast station.
+ * Assumes that the station was already allocated.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add.
+ */
+int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
+ return -ENOSPC;
+
+ return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+}
+
+/* Send the FW a request to remove the station from it's internal data
+ * structures, but DO NOT remove the entry from the local data structures. */
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *bsta)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+ return ret;
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add. */
+int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ u32 qmask;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, bsta);
+ return ret;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ if (ret)
+ return ret;
+
+ iwl_mvm_dealloc_int_sta(mvm, bsta);
+ return ret;
+}
+
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.add_immediate_ba_tid = (u8) tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
+ STA_MODIFY_REMOVE_BA_TID;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ break;
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ ret = -ENOSPC;
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (start) {
+ mvm_sta->tfd_queue_msk |= BIT(queue);
+ mvm_sta->tid_disable_agg &= ~BIT(tid);
+ } else {
+ mvm_sta->tfd_queue_msk &= ~BIT(queue);
+ mvm_sta->tid_disable_agg |= BIT(tid);
+ }
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ return ret;
+}
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+};
+
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data;
+ int txq_id;
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+ IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
+ mvmsta->tid_data[tid].state);
+ return -ENXIO;
+ }
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
+ txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+ if (mvm->queue_to_mac80211[txq_id] ==
+ IWL_INVALID_MAC80211_QUEUE)
+ break;
+
+ if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ return -EIO;
+ }
+
+ /* the new tx queue is still connected to the same mac80211 queue */
+ mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+
+ spin_lock_bh(&mvmsta->lock);
+ tid_data = &mvmsta->tid_data[tid];
+ tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->txq_id = txq_id;
+ *ssn = tid_data->ssn;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->ssn,
+ tid_data->next_reclaimed);
+
+ if (tid_data->ssn == tid_data->next_reclaimed) {
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ return 0;
+}
+
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ int queue, fifo, ret;
+ u16 ssn;
+
+ buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
+
+ spin_lock_bh(&mvmsta->lock);
+ ssn = tid_data->ssn;
+ queue = tid_data->txq_id;
+ tid_data->state = IWL_AGG_ON;
+ tid_data->ssn = 0xffff;
+ spin_unlock_bh(&mvmsta->lock);
+
+ fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+
+ iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid,
+ buf_size, ssn);
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ mvmsta->max_agg_bufsize =
+ min(mvmsta->max_agg_bufsize, buf_size);
+ mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+ if (mvm->cfg->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+ mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ /*
+ * TODO: remove the TLC_RTS flag when we tear down the last
+ * AGG session (agg_tids_count in DVM)
+ */
+ }
+
+ IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
+}
+
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+ int err;
+
+ spin_lock_bh(&mvmsta->lock);
+
+ txq_id = tid_data->txq_id;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->state);
+
+ switch (tid_data->state) {
+ case IWL_AGG_ON:
+ tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "ssn = %d, next_recl = %d\n",
+ tid_data->ssn, tid_data->next_reclaimed);
+
+ /* There are still packets for this RA / TID in the HW */
+ if (tid_data->ssn != tid_data->next_reclaimed) {
+ tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
+ err = 0;
+ break;
+ }
+
+ tid_data->ssn = 0xffff;
+ iwl_trans_txq_disable(mvm->trans, txq_id);
+ /* fall through */
+ case IWL_AGG_STARTING:
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * The agg session has been stopped before it was set up. This
+ * can happen when the AddBA timer times out for example.
+ */
+
+ /* No barriers since we are under mutex */
+ lockdep_assert_held(&mvm->mutex);
+ mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ tid_data->state = IWL_AGG_OFF;
+ err = 0;
+ break;
+ default:
+ IWL_ERR(mvm,
+ "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+ mvmsta->sta_id, tid, tid_data->state);
+ IWL_ERR(mvm,
+ "\ttid_data->txq_id = %d\n", tid_data->txq_id);
+ err = -EINVAL;
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ return err;
+}
+
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+
+ if (i == STA_KEY_MAX_NUM)
+ return STA_KEY_IDX_INVALID;
+
+ __set_bit(i, mvm->fw_key_table);
+
+ return i;
+}
+
+static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+
+ if (sta) {
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ return mvm_sta->sta_id;
+ }
+
+ /*
+ * The device expects GTKs for station interfaces to be
+ * installed as GTKs for the AP station. If we have no
+ * station ID, then use AP's station ID.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
+ return mvmvif->ap_sta_id;
+
+ return IWL_INVALID_STATION;
+}
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
+ u32 cmd_flags)
+{
+ __le16 key_flags;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret, status;
+ u16 keyidx;
+ int i;
+
+ keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK;
+ key_flags = cpu_to_le16(keyidx);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+ cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+ memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.key.key_flags = key_flags;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.sta_id = sta_id;
+
+ status = ADD_STA_SUCCESS;
+ if (cmd_flags == CMD_SYNC)
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+ else
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, bool remove_key)
+{
+ struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+
+ /* verify the key details match the required command's expectations */
+ if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
+ (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
+ return -EINVAL;
+
+ igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+ igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+ if (remove_key) {
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+ } else {
+ struct ieee80211_key_seq seq;
+ const u8 *pn;
+
+ memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
+ ieee80211_aes_cmac_calculate_k1_k2(keyconf,
+ igtk_cmd.K1, igtk_cmd.K2);
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ pn = seq.aes_cmac.pn;
+ igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
+ ((u64) pn[4] << 8) |
+ ((u64) pn[3] << 16) |
+ ((u64) pn[2] << 24) |
+ ((u64) pn[1] << 32) |
+ ((u64) pn[0] << 40));
+ }
+
+ IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+ remove_key ? "removing" : "installing",
+ igtk_cmd.sta_id);
+
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC,
+ sizeof(igtk_cmd), &igtk_cmd);
+}
+
+
+static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+
+ if (sta)
+ return sta->addr;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ u8 sta_id = mvmvif->ap_sta_id;
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ return sta->addr;
+ }
+
+
+ return NULL;
+}
+
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ bool have_key_offset)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ int ret;
+ u8 *addr, sta_id;
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Get the station id from the mvm local station table */
+ sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(mvm, "Failed to find station id\n");
+ return -EINVAL;
+ }
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+ goto end;
+ }
+
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station table.
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+ }
+
+ mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ return -EINVAL;
+
+ if (!have_key_offset) {
+ /*
+ * The D3 firmware hardcodes the PTK offset to 0, so we have to
+ * configure it there. As a result, this workaround exists to
+ * let the caller set the key offset (hw_key_idx), see d3.c.
+ */
+ keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return -ENOSPC;
+ }
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ /* get phase 1 key from mac80211 */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ seq.tkip.iv32, p1k, CMD_SYNC);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ 0, NULL, CMD_SYNC);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+
+end:
+ IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta->addr, ret);
+ return ret;
+}
+
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ __le16 key_flags;
+ int ret, status;
+ u8 sta_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Get the station id from the mvm local station table */
+ sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+
+ IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+
+ ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ if (!ret) {
+ IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+ keyconf->hw_key_idx);
+ return -ENOENT;
+ }
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+ return 0;
+ }
+
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station table,
+ * for example when a GTK is removed (where the sta_id will then be
+ * the AP ID, and no station was passed by mac80211.)
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+ }
+
+ mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ return -EINVAL;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.key.key_flags = key_flags;
+ cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.sta_id = sta_id;
+
+ cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.add_modify = STA_MODE_MODIFY;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return;
+
+ rcu_read_lock();
+
+ if (!sta) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ mvm_sta = (void *)sta->drv_priv;
+ iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ iv32, phase1key, CMD_ASYNC);
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, int sta_id)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = sta_id,
+ .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+ .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
+ };
+ int ret;
+
+ /*
+ * Same modify mask for sleep_tx_count and sleep_state_flags but this
+ * should be fine since if we set the STA as "awake", then
+ * sleep_tx_count is not relevant.
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, int sta_id,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt)
+{
+ u16 sleep_state_flags =
+ (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
+ STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = sta_id,
+ .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ /*
+ * Same modify mask for sleep_tx_count and sleep_state_flags so
+ * we must set the sleep_state_flags too.
+ */
+ .sleep_state_flags = cpu_to_le16(sleep_state_flags),
+ };
+ int ret;
+
+ /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
new file mode 100644
index 00000000000..bdd7c5ed822
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -0,0 +1,372 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __sta_h__
+#define __sta_h__
+
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+#include <linux/wait.h>
+
+#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "rs.h"
+
+struct iwl_mvm;
+
+/**
+ * DOC: station table - introduction
+ *
+ * The station table is a list of data structure that reprensent the stations.
+ * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
+ * In GO/AP mode, the driver will have as many stations as associated clients.
+ * All these stations are reflected in the fw's station table. The driver
+ * keeps the fw's station table up to date with the ADD_STA command. Stations
+ * can be removed by the REMOVE_STA command.
+ *
+ * All the data related to a station is held in the structure %iwl_mvm_sta
+ * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
+ * This data includes the index of the station in the fw, per tid information
+ * (sequence numbers, Block-ack state machine, etc...). The stations are
+ * created and deleted by the %sta_state callback from %ieee80211_ops.
+ *
+ * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
+ * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
+ * station index. That way, the driver is able to get the tid related data in
+ * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
+ * paths are triggered by the fw, and the driver needs to get a pointer to the
+ * %ieee80211 structure. This map helps to get that pointer quickly.
+ */
+
+/**
+ * DOC: station table - locking
+ *
+ * As stated before, the station is created / deleted by mac80211's %sta_state
+ * callback from %ieee80211_ops which can sleep. The next paragraph explains
+ * the locking of a single stations, the next ones relates to the station
+ * table.
+ *
+ * The station holds the sequence number per tid. So this data needs to be
+ * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
+ * information (the state machine / and the logic that checks if the queues
+ * were drained), so it also needs to be accessible from the Tx response flow.
+ * In short, the station needs to be access from sleepable context as well as
+ * from tasklets, so the station itself needs a spinlock.
+ *
+ * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
+ * the mvm op_mode. This is possible since %sta_state can sleep.
+ * The pointers in this map are RCU protected, hence we won't replace the
+ * station while we have Tx / Tx response / BA notification running.
+ *
+ * If a station is deleted while it still has packets in its A-MPDU queues,
+ * then the reclaim flow will notice that there is no station in the map for
+ * sta_id and it will dump the responses.
+ */
+
+/**
+ * DOC: station table - internal stations
+ *
+ * The FW needs a few internal stations that are not reflected in
+ * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
+ * scanning and P2P device (during the GO negotiation).
+ * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
+ * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
+ * Usually the data for these stations is static, so no locking is required,
+ * and no TID data as this is also not needed.
+ * One thing to note, is that these stations have an ID in the fw, but not
+ * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
+ * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * pointers from this mapping need to check that the value is not error
+ * or NULL.
+ *
+ * Currently there is only one auxiliary station for scanning, initialized
+ * on init.
+ */
+
+/**
+ * DOC: station table - AP Station in STA mode
+ *
+ * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
+ * %ap_sta_id. To get the point to the coresponsding %ieee80211_sta,
+ * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
+ * the AP station from the fw before setting the MAC context as unassociated.
+ * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
+ * removed by mac80211, but the station won't be removed in the fw until the
+ * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
+ */
+
+/**
+ * DOC: station table - Drain vs. Flush
+ *
+ * Flush means that all the frames in the SCD queue are dumped regardless the
+ * station to which they were sent. We do that when we disassociate and before
+ * we remove the STA of the AP. The flush can be done synchronously against the
+ * fw.
+ * Drain means that the fw will drop all the frames sent to a specific station.
+ * This is useful when a client (if we are IBSS / GO or AP) disassociates. In
+ * that case, we need to drain all the frames for that client from the AC queues
+ * that are shared with the other clients. Only then, we can remove the STA in
+ * the fw. In order to do so, we track the non-AMPDU packets for each station.
+ * If mac80211 removes a STA and if it still has non-AMPDU packets pending in
+ * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
+ * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
+ * (we know about it with its Tx response), we remove the station in fw and set
+ * it as %NULL in %fw_id_to_mac_id: this is the purpose of
+ * %iwl_mvm_sta_drained_wk.
+ */
+
+/**
+ * DOC: station table - fw restart
+ *
+ * When the fw asserts, or we have any other issue that requires to reset the
+ * driver, we require mac80211 to reconfigure the driver. Since the private
+ * data of the stations is embed in mac80211's %ieee80211_sta, that data will
+ * not be zeroed and needs to be reinitialized manually.
+ * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
+ * that we must not allocate a new sta_id but reuse the previous one. This
+ * means that the stations being re-added after the reset will have the same
+ * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
+ * map, since the stations aren't in the fw any more. Internal stations that
+ * are not added by mac80211 will be re-added in the init flow that is called
+ * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
+ * %iwl_mvm_up.
+ */
+
+/**
+ * DOC: AP mode - PS
+ *
+ * When a station is asleep, the fw will set it as "asleep". All the
+ * non-aggregation frames to that station will be dropped by the fw
+ * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * AMPDUs are in a separate queue that is stopped by the fw. We just need to
+ * let mac80211 know how many frames we have in these queues so that it can
+ * properly handle trigger frames.
+ * When the a trigger frame is received, mac80211 tells the driver to send
+ * frames from the AMPDU queues or AC queue depending on which queue are
+ * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
+ * all the knowledege since all the non-agg frames are buffered / filtered, and
+ * the driver tells mac80211 about agg frames). The driver needs to tell the fw
+ * to let frames out even if the station is asleep. This is done by
+ * %iwl_mvm_sta_modify_sleep_tx_count.
+ * When we receive a frame from that station with PM bit unset, the
+ * driver needs to let the fw know that this station isn't alseep any more.
+ * This is done by %iwl_mvm_sta_modify_ps_wake.
+ *
+ * TODO - EOSP handling
+ */
+
+/**
+ * enum iwl_mvm_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_mvm_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_STARTING,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_mvm_tid_data {
+ u16 seq_number;
+ u16 next_reclaimed;
+ /* The rest is Tx AGG related */
+ u32 rate_n_flags;
+ enum iwl_mvm_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_mvm_sta - representation of a station in the driver
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ * @mac_id_n_color: the MAC context this station is linked to
+ * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
+ * tid.
+ * @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
+ * and from Tx response flow, it needs a spinlock.
+ * @pending_frames: number of frames for this STA on the shared Tx queues.
+ * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is placed in that
+ * space.
+ *
+ */
+struct iwl_mvm_sta {
+ u32 sta_id;
+ u32 tfd_queue_msk;
+ u32 mac_id_n_color;
+ u16 tid_disable_agg;
+ u8 max_agg_bufsize;
+ spinlock_t lock;
+ atomic_t pending_frames;
+ struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+ struct iwl_lq_sta lq_sta;
+ struct ieee80211_vif *vif;
+
+#ifdef CONFIG_PM_SLEEP
+ u16 last_seq_ctl;
+#endif
+};
+
+/**
+ * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
+ * broadcast)
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ */
+struct iwl_mvm_int_sta {
+ u32 sta_id;
+ u32 tfd_queue_msk;
+};
+
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update);
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ bool have_key_offset);
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+
+/* AMPDU */
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start);
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size);
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
+ u32 qmask);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta);
+int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *bsta);
+int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta);
+void iwl_mvm_sta_drained_wk(struct work_struct *wk);
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, int sta_id);
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, int sta_id,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt);
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain);
+
+#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
new file mode 100644
index 00000000000..c09b71f2375
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -0,0 +1,511 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/jiffies.h>
+#include <net/mac80211.h>
+
+#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+#include "fw-api.h"
+#include "time-event.h"
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/* A TimeUnit is 1024 microsecond */
+#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
+#define MSEC_TO_TU(_msec) (_msec*1000/1024)
+
+/* For ROC use a TE type which has priority high enough to be scheduled when
+ * there is a concurrent BSS or GO/AP. Currently, use a TE type that has
+ * priority similar to the TE priority used for action scans by the FW.
+ * TODO: This needs to be changed, based on the reason for the ROC, i.e., use
+ * TE_P2P_DEVICE_DISCOVERABLE for remain on channel without mgmt skb, and use
+ * TE_P2P_DEVICE_ACTION_SCAN
+ */
+#define IWL_MVM_ROC_TE_TYPE TE_P2P_DEVICE_ACTION_SCAN
+
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ if (te_data->id == TE_MAX)
+ return;
+
+ list_del(&te_data->list);
+ te_data->running = false;
+ te_data->uid = 0;
+ te_data->id = TE_MAX;
+ te_data->vif = NULL;
+}
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ synchronize_net();
+
+ /*
+ * Flush the offchannel queue -- this is called when the time
+ * event finishes or is cancelled, so that frames queued for it
+ * won't get stuck on the queue and be transmitted in the next
+ * time event.
+ * We have to send the command asynchronously since this cannot
+ * be under the mutex for locking reasons, but that's not an
+ * issue as it will have to complete before the next command is
+ * executed, and a new time event means a new command.
+ */
+ iwl_mvm_flush_tx_path(mvm, BIT(IWL_OFFCHANNEL_QUEUE), false);
+}
+
+static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
+{
+ /*
+ * First, clear the ROC_RUNNING status bit. This will cause the TX
+ * path to drop offchannel transmissions. That would also be done
+ * by mac80211, but it is racy, in particular in the case that the
+ * time event actually completed in the firmware (which is handled
+ * in iwl_mvm_te_handle_notif).
+ */
+ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+
+ /*
+ * Of course, our status bit is just as racy as mac80211, so in
+ * addition, fire off the work struct which will drop all frames
+ * from the hardware queues that made it through the race. First
+ * it will of course synchronize the TX path to make sure that
+ * any *new* TX will be rejected.
+ */
+ schedule_work(&mvm->roc_done_wk);
+}
+
+/*
+ * Handles a FW notification for an event that is known to the driver.
+ *
+ * @mvm: the mvm component
+ * @te_data: the time event data
+ * @notif: the notification data corresponding the time event data.
+ */
+static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ /*
+ * The FW sends the start/end time event notifications even for events
+ * that it fails to schedule. This is indicated in the status field of
+ * the notification. This happens in cases that the scheduler cannot
+ * find a schedule that can handle the event (for example requesting a
+ * P2P Device discoveribility, while there are other higher priority
+ * events in the system).
+ */
+ WARN_ONCE(!le32_to_cpu(notif->status),
+ "Failed to schedule time event\n");
+
+ if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_END) {
+ IWL_DEBUG_TE(mvm,
+ "TE ended - current time %lu, estimated end %lu\n",
+ jiffies, te_data->end_jiffies);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_roc_finished(mvm);
+ }
+
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ if (te_data->vif->type == NL80211_IFTYPE_STATION &&
+ (!te_data->vif->bss_conf.assoc ||
+ !te_data->vif->bss_conf.dtim_period)) {
+ IWL_ERR(mvm,
+ "No assocation and the time event is over already...\n");
+ ieee80211_connection_loss(te_data->vif);
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ } else if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_START) {
+ te_data->running = true;
+ te_data->end_jiffies = jiffies +
+ TU_TO_JIFFIES(te_data->duration);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw);
+ }
+ } else {
+ IWL_WARN(mvm, "Got TE with unknown action\n");
+ }
+}
+
+/*
+ * The Rx handler for time event notifications
+ */
+int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_time_event_notif *notif = (void *)pkt->data;
+ struct iwl_mvm_time_event_data *te_data, *tmp;
+
+ IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+ if (le32_to_cpu(notif->unique_id) == te_data->uid)
+ iwl_mvm_te_handle_notif(mvm, te_data, notif);
+ }
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return 0;
+}
+
+static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_time_event_data *te_data = data;
+ struct iwl_time_event_resp *resp;
+ int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+ te_data->uid = le32_to_cpu(resp->unique_id);
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+ te_data->uid);
+ return true;
+}
+
+static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_cmd *te_cmd)
+{
+ static const u8 time_event_response[] = { TIME_EVENT_CMD };
+ struct iwl_notification_wait wait_time_event;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (WARN_ON(te_data->id != TE_MAX)) {
+ spin_unlock_bh(&mvm->time_event_lock);
+ return -EIO;
+ }
+ te_data->vif = vif;
+ te_data->duration = le32_to_cpu(te_cmd->duration);
+ te_data->id = le32_to_cpu(te_cmd->id);
+ list_add_tail(&te_data->list, &mvm->time_event_list);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * Use a notification wait, which really just processes the
+ * command response and doesn't wait for anything, in order
+ * to be able to process the response and get the UID inside
+ * the RX path. Using CMD_WANT_SKB doesn't work because it
+ * stores the buffer and then wakes up this thread, by which
+ * time another notification (that the time event started)
+ * might already be processed unsuccessfully.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+ time_event_response,
+ ARRAY_SIZE(time_event_response),
+ iwl_mvm_time_event_response, te_data);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(*te_cmd), te_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+ iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+ goto out_clear_te;
+ }
+
+ /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+ /* should never fail */
+ WARN_ON_ONCE(ret);
+
+ if (ret) {
+ out_clear_te:
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+ return ret;
+}
+
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (te_data->running &&
+ time_after(te_data->end_jiffies,
+ jiffies + TU_TO_JIFFIES(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ return;
+ }
+
+ if (te_data->running) {
+ IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
+ te_data->uid,
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ /*
+ * we don't have enough time
+ * cancel the current TE and issue a new one
+ * Of course it would be better to remove the old one only
+ * when the new one is added, but we don't care if we are off
+ * channel for a bit. All we need to do, is not to return
+ * before we actually begin to be on the channel.
+ */
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+ time_cmd.apply_time =
+ cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+
+ time_cmd.dep_policy = TE_INDEPENDENT;
+ time_cmd.is_present = cpu_to_le32(1);
+ time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
+ time_cmd.max_delay = cpu_to_le32(500);
+ /* TODO: why do we need to interval = bi if it is not periodic? */
+ time_cmd.interval = cpu_to_le32(1);
+ time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
+ time_cmd.duration = cpu_to_le32(duration);
+ time_cmd.repeat = cpu_to_le32(1);
+ time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
+
+ iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_time_event_cmd time_cmd = {};
+ u32 id, uid;
+ int ret;
+
+ /*
+ * It is possible that by the time we got to this point the time
+ * event was already removed.
+ */
+ spin_lock_bh(&mvm->time_event_lock);
+
+ /* Save time event uid before clearing its data */
+ uid = te_data->uid;
+ id = te_data->id;
+
+ /*
+ * The clear_data function handles time events that were already removed
+ */
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * It is possible that by the time we try to remove it, the time event
+ * has already ended and removed. In such a case there is no need to
+ * send a removal command.
+ */
+ if (id == TE_MAX) {
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
+ return;
+ }
+
+ /* When we remove a TE, the UID is to be set in the id field */
+ time_cmd.id = cpu_to_le32(uid);
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+ IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_ASYNC,
+ sizeof(time_cmd), &time_cmd);
+ if (WARN_ON(ret))
+ return;
+}
+
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+
+ lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+ if (te_data->running) {
+ IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Flush the done work, just in case it's still pending, so that
+ * the work it does can complete and we can accept new frames.
+ */
+ flush_work(&mvm->roc_done_wk);
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE);
+
+ time_cmd.apply_time = cpu_to_le32(0);
+ time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
+ time_cmd.is_present = cpu_to_le32(1);
+
+ time_cmd.interval = cpu_to_le32(1);
+
+ /*
+ * IWL_MVM_ROC_TE_TYPE can have lower priority than other events
+ * that are being scheduled by the driver/fw, and thus it might not be
+ * scheduled. To improve the chances of it being scheduled, allow it to
+ * be fragmented.
+ * In addition, for the same reasons, allow to delay the scheduling of
+ * the time event.
+ */
+ time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
+ time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
+ time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+ time_cmd.repeat = cpu_to_le32(1);
+ time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END);
+
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_time_event_data *te_data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Iterate over the list of time events and find the time event that is
+ * associated with a P2P_DEVICE interface.
+ * This assumes that a P2P_DEVICE interface can have only a single time
+ * event at any given time and this time event coresponds to a ROC
+ * request
+ */
+ mvmvif = NULL;
+ spin_lock_bh(&mvm->time_event_lock);
+ list_for_each_entry(te_data, &mvm->time_event_list, list) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ break;
+ }
+ }
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (!mvmvif) {
+ IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
+ return;
+ }
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+
+ iwl_mvm_roc_finished(mvm);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
new file mode 100644
index 00000000000..64fb57a5ab4
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __time_event_h__
+#define __time_event_h__
+
+#include "fw-api.h"
+
+#include "mvm.h"
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/**
+ * DOC: Abstraction to the driver
+ *
+ * In order to simplify the use of time events to the rest of the driver,
+ * we abstract the use of time events. This component provides the functions
+ * needed by the driver.
+ */
+
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
+#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mvm_protect_session - start / extend the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ * @duration: the duration of the session in TU.
+ * @min_duration: will start a new session if the current session will end
+ * in less than min_duration.
+ *
+ * This function can be used to start a session protection which means that the
+ * fw will stay on the channel for %duration_ms milliseconds. This function
+ * will block (sleep) until the session starts. This function can also be used
+ * to extend a currently running session.
+ * This function is meant to be used for BSS association for example, where we
+ * want to make sure that the fw stays on the channel during the association.
+ */
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration);
+
+/**
+ * iwl_mvm_stop_session_protection - cancel the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be cancelled because
+ * the other bindings wait for the medium during that time.
+ * This funtions doesn't sleep.
+ */
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/*
+ * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
+ */
+int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+/**
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionlity
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is requested. It is assumed
+ * that the vif type is NL80211_IFTYPE_P2P_DEVICE
+ * @duration: the requested duration in millisecond for the fw to be on the
+ * channel that is bound to the vif.
+ *
+ * This function can be used to issue a remain on channel session,
+ * which means that the fw will stay in the channel for the request %duration
+ * milliseconds. The function is async, meaning that it only issues the ROC
+ * request but does not wait for it to start. Once the FW is ready to serve the
+ * ROC request, it will issue a notification to the driver that it is on the
+ * requested channel. Once the FW completes the ROC request it will issue
+ * another notification to the driver.
+ */
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration);
+
+/**
+ * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity
+ * @mvm: the mvm component
+ *
+ * This function can be used to cancel an ongoing ROC session.
+ * The function is async, it will instruct the FW to stop serving the ROC
+ * session, but will not wait for the actual stopping of the session.
+ */
+void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm);
+
+/**
+ * iwl_mvm_remove_time_event - general function to clean up of time event
+ * @mvm: the mvm component
+ * @vif: the vif to which the time event belongs
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function can be used to cancel a time event regardless its type.
+ * It is useful for cleaning up time events running before removing an
+ * interface.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data);
+
+/**
+ * iwl_mvm_te_clear_data - remove time event from list
+ * @mvm: the mvm component
+ * @te_data: the time event data to remove
+ *
+ * This function is mostly internal, it is made available here only
+ * for firmware restart purposes.
+ */
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data);
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk);
+
+#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
new file mode 100644
index 00000000000..6b67ce3f679
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -0,0 +1,916 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+
+#include "iwl-trans.h"
+#include "iwl-eeprom-parse.h"
+#include "mvm.h"
+#include "sta.h"
+
+/*
+ * Sets most of the Tx cmd's fields
+ */
+static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
+ u32 len = skb->len + FCS_LEN;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ tx_flags |= TX_CMD_FLG_ACK;
+ else
+ tx_flags &= ~TX_CMD_FLG_ACK;
+
+ if (ieee80211_is_probe_resp(fc))
+ tx_flags |= TX_CMD_FLG_TSF;
+ else if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+
+ /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
+ if (info->band == IEEE80211_BAND_2GHZ &&
+ (skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_back_req(fc) ||
+ ieee80211_is_mgmt(fc)))
+ tx_flags |= TX_CMD_FLG_BT_DIS;
+
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ } else {
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+
+ /* The spec allows Action frames in A-MPDU, we don't support
+ * it
+ */
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+ } else {
+ tx_cmd->pm_frame_timeout = 0;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+ if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = cpu_to_le32(tx_flags);
+ /* Total # bytes to be transmitted */
+ tx_cmd->len = cpu_to_le16((u16)skb->len);
+ tx_cmd->next_frame_len = 0;
+ tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_cmd->sta_id = sta_id;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are rate related
+ */
+static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rate_plcp;
+
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc)) {
+ tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+ tx_cmd->rts_retry_limit =
+ min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+ } else {
+ tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ }
+
+ /*
+ * for data packets, rate info comes from the table inside he fw. This
+ * table is controlled by LINK_QUALITY commands
+ */
+
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ }
+
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+ "Got an HT rate for a non data frame 0x%x\n",
+ info->control.rates[0].flags);
+
+ rate_idx = info->control.rates[0].idx;
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
+ rate_idx = rate_lowest_index(
+ &mvm->nvm_data->bands[info->band], sta);
+
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+
+ /* For 2.4 GHZ band, check that there is no need to remap */
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+ mvm->mgmt_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant,
+ mvm->mgmt_last_antenna_idx);
+ rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+ ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
+ TX_CMD_SEC_WEP_KEY_IDX_MSK);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+static struct iwl_device_cmd *
+iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+
+ dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
+
+ if (unlikely(!dev_cmd))
+ return NULL;
+
+ memset(dev_cmd, 0, sizeof(*dev_cmd));
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+ if (info->control.hw_key)
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+
+ iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
+
+ iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ info->driver_data[0] = NULL;
+ info->driver_data[1] = dev_cmd;
+
+ return dev_cmd;
+}
+
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+ u8 sta_id;
+
+ if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
+ return -1;
+
+ if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+ (!info->control.vif ||
+ info->hw_queue != info->control.vif->cab_queue)))
+ return -1;
+
+ /*
+ * If the interface on which frame is sent is the P2P_DEVICE
+ * or an AP/GO interface use the broadcast station associated
+ * with it; otherwise use the AUX station.
+ */
+ if (info->control.vif &&
+ (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ info->control.vif->type == NL80211_IFTYPE_AP)) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ sta_id = mvmvif->bcast_sta.sta_id;
+ } else {
+ sta_id = mvm->aux_sta.sta_id;
+ }
+
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+ if (!dev_cmd)
+ return -1;
+
+ /* From now on, we cannot access info->control */
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+ __le16 fc;
+ u16 seq_number = 0;
+ u8 tid = IWL_MAX_TID_COUNT;
+ u8 txq_id = info->hw_queue;
+ bool is_data_qos = false, is_ampdu = false;
+
+ mvmsta = (void *)sta->drv_priv;
+ fc = hdr->frame_control;
+
+ if (WARN_ON_ONCE(!mvmsta))
+ return -1;
+
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_INVALID_STATION))
+ return -1;
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+ if (!dev_cmd)
+ goto drop;
+
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ /* From now on, we cannot access info->control */
+
+ spin_lock(&mvmsta->lock);
+
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ goto drop_unlock_sta;
+
+ seq_number = mvmsta->tid_data[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ is_data_qos = true;
+ is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+ if (is_ampdu) {
+ if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
+ goto drop_unlock_sta;
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ }
+
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
+ tid, txq_id, seq_number);
+
+ /* NOTE: aggregation will need changes here (for txq id) */
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+ goto drop_unlock_sta;
+
+ if (is_data_qos && !ieee80211_has_morefrags(fc))
+ mvmsta->tid_data[tid].seq_number = seq_number;
+
+ spin_unlock(&mvmsta->lock);
+
+ if (mvmsta->vif->type == NL80211_IFTYPE_AP &&
+ txq_id < IWL_FIRST_AMPDU_QUEUE)
+ atomic_inc(&mvmsta->pending_frames);
+
+ return 0;
+
+drop_unlock_sta:
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+drop:
+ return -1;
+}
+
+static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 tid)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct ieee80211_vif *vif = mvmsta->vif;
+
+ lockdep_assert_held(&mvmsta->lock);
+
+ if (tid_data->ssn != tid_data->next_reclaimed)
+ return;
+
+ switch (tid_data->state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue addBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue DELBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+ tid_data->state = IWL_AGG_OFF;
+ /*
+ * we can't hold the mutex - but since we are after a sequence
+ * point (call to iwl_trans_txq_disable), so we don't even need
+ * a memory barrier.
+ */
+ mvm->queue_to_mac80211[tid_data->txq_id] =
+ IWL_INVALID_MAC80211_QUEUE;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(SMALL_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+ info->status.antenna =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ r->flags |= IEEE80211_TX_RC_MCS;
+ r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ ieee80211_rate_set_vht(
+ r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1);
+ r->flags |= IEEE80211_TX_RC_VHT_MCS;
+ } else {
+ r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ info->band);
+ }
+}
+
+static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct ieee80211_sta *sta;
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
+ struct iwl_mvm_sta *mvmsta;
+ struct sk_buff_head skbs;
+ u8 skb_freed = 0;
+ u16 next_reclaimed, seq_ctl;
+
+ __skb_queue_head_init(&skbs);
+
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ skb_freed++;
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ /* inform mac80211 about what happened with the frame */
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+ case TX_STATUS_FAIL_DEST_PS:
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ break;
+ default:
+ break;
+ }
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
+ info);
+
+ /* Single frame failure in an AMPDU queue => send BAR */
+ if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
+ !(info->flags & IEEE80211_TX_STAT_ACK)) {
+ /* there must be only one skb in the skb_list */
+ WARN_ON_ONCE(skb_freed > 1 ||
+ !skb_queue_empty(&skbs));
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ }
+
+ /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
+ if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ seq_ctl = le16_to_cpu(hdr->seq_ctrl);
+ }
+
+ ieee80211_tx_status_ni(mvm->hw, skb);
+ }
+
+ if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
+ /* If this is an aggregation queue, we use the ssn since:
+ * ssn = wifi seq_num % 256.
+ * The seq_ctl is the sequence control of the packet to which
+ * this Tx response relates. But if there is a hole in the
+ * bitmap of the BA we received, this Tx response may allow to
+ * reclaim the hole and all the subsequent packets that were
+ * already acked. In that case, seq_ctl != ssn, and the next
+ * packet to be reclaimed will be ssn and not seq_ctl. In that
+ * case, several packets will be reclaimed even if
+ * frame_count = 1.
+ *
+ * The ssn is the index (% 256) of the latest packet that has
+ * treated (acked / dropped) + 1.
+ */
+ next_reclaimed = ssn;
+ } else {
+ /* The next packet to be reclaimed is the one after this one */
+ next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10);
+ }
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TXQ %d status %s (0x%08x)\n\t\t\t\tinitial_rate 0x%x "
+ "retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
+ txq_id, iwl_mvm_get_tx_fail_reason(status),
+ status, le32_to_cpu(tx_resp->initial_rate),
+ tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
+ ssn, next_reclaimed, seq_ctl);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (!IS_ERR_OR_NULL(sta)) {
+ mvmsta = (void *)sta->drv_priv;
+
+ if (tid != IWL_TID_NON_QOS) {
+ struct iwl_mvm_tid_data *tid_data =
+ &mvmsta->tid_data[tid];
+
+ spin_lock_bh(&mvmsta->lock);
+ tid_data->next_reclaimed = next_reclaimed;
+ IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+ spin_unlock_bh(&mvmsta->lock);
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ mvmsta->last_seq_ctl = seq_ctl;
+#endif
+ } else {
+ sta = NULL;
+ mvmsta = NULL;
+ }
+
+ /*
+ * If the txq is not an AMPDU queue, there is no chance we freed
+ * several skbs. Check that out...
+ * If there are no pending frames for this STA, notify mac80211 that
+ * this station can go to sleep in its STA table.
+ */
+ if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta &&
+ !WARN_ON(skb_freed > 1) &&
+ mvmsta->vif->type == NL80211_IFTYPE_AP &&
+ atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) {
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
+
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
+static const char *iwl_get_agg_tx_status(u16 status)
+{
+ switch (status & AGG_TX_STATE_STATUS_MSK) {
+ AGG_TX_STATE_(TRANSMITTED);
+ AGG_TX_STATE_(UNDERRUN);
+ AGG_TX_STATE_(BT_PRIO);
+ AGG_TX_STATE_(FEW_BYTES);
+ AGG_TX_STATE_(ABORT);
+ AGG_TX_STATE_(LAST_SENT_TTL);
+ AGG_TX_STATE_(LAST_SENT_TRY_CNT);
+ AGG_TX_STATE_(LAST_SENT_BT_KILL);
+ AGG_TX_STATE_(SCD_QUERY);
+ AGG_TX_STATE_(TEST_BAD_CRC32);
+ AGG_TX_STATE_(RESPONSE);
+ AGG_TX_STATE_(DUMP_TX);
+ AGG_TX_STATE_(DELAY_TX);
+ }
+
+ return "UNKNOWN";
+}
+
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ int i;
+
+ for (i = 0; i < tx_resp->frame_count; i++) {
+ u16 fstatus = le16_to_cpu(frame_status[i].status);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
+ iwl_get_agg_tx_status(fstatus),
+ fstatus & AGG_TX_STATE_STATUS_MSK,
+ (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
+ AGG_TX_STATE_TRY_CNT_POS,
+ le16_to_cpu(frame_status[i].sequence));
+ }
+}
+#else
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ struct ieee80211_sta *sta;
+
+ if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_FIRST_AMPDU_QUEUE))
+ return;
+
+ if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
+ return;
+
+ iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ mvmsta->tid_data[tid].rate_n_flags =
+ le32_to_cpu(tx_resp->initial_rate);
+ }
+
+ rcu_read_unlock();
+}
+
+int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+
+ if (tx_resp->frame_count == 1)
+ iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+ else
+ iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+
+ return 0;
+}
+
+int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
+ struct sk_buff_head reclaimed_skbs;
+ struct iwl_mvm_tid_data *tid_data;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int sta_id, tid, freed;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
+
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* Reclaiming frames for a station that has been deleted ? */
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ mvmsta = (void *)sta->drv_priv;
+ tid_data = &mvmsta->tid_data[tid];
+
+ if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
+ tid_data->txq_id, tid, scd_flow)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /*
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+ iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
+ &reclaimed_skbs);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ (u8 *)&ba_notif->sta_addr_lo32,
+ ba_notif->sta_id);
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ (unsigned long long)le64_to_cpu(ba_notif->bitmap),
+ scd_flow, ba_resp_scd_ssn, ba_notif->txed,
+ ba_notif->txed_2_done);
+
+ tid_data->next_reclaimed = ba_resp_scd_ssn;
+
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+ freed = 0;
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+ info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_notif->txed_2_done;
+ info->status.ampdu_len = ba_notif->txed;
+ iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
+ info);
+ }
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ rcu_read_unlock();
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ skb = __skb_dequeue(&reclaimed_skbs);
+ ieee80211_tx_status_ni(mvm->hw, skb);
+ }
+
+ return 0;
+}
+
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .queues_ctl = cpu_to_le32(tfd_msk),
+ .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
+ };
+
+ u32 flags = sync ? CMD_SYNC : CMD_ASYNC;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
new file mode 100644
index 00000000000..000e842c2ed
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -0,0 +1,472 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+
+#include "mvm.h"
+#include "fw-api-rs.h"
+
+/*
+ * Will return 0 even if the cmd failed when RFKILL is asserted unless
+ * CMD_WANT_SKB is set in cmd->flags.
+ */
+int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+ /*
+ * Synchronous commands from this op-mode must hold
+ * the mutex, this ensures we don't try to send two
+ * (or more) synchronous commands at a time.
+ */
+ if (!(cmd->flags & CMD_ASYNC))
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+
+ /*
+ * If the caller wants the SKB, then don't hide any problems, the
+ * caller might access the response buffer which will be NULL if
+ * the command failed.
+ */
+ if (cmd->flags & CMD_WANT_SKB)
+ return ret;
+
+ /* Silently ignore failures if RFKILL is asserted */
+ if (!ret || ret == -ERFKILL)
+ return 0;
+ return ret;
+}
+
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+ u32 flags, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
+ u32 *status)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_cmd_response *resp;
+ int ret, resp_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Only synchronous commands can wait for status,
+ * we use WANT_SKB so the caller can't.
+ */
+ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
+ "cmd flags %x", cmd->flags))
+ return -EINVAL;
+
+ cmd->flags |= CMD_SYNC | CMD_WANT_SKB;
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+ if (ret == -ERFKILL) {
+ /*
+ * The command failed because of RFKILL, don't update
+ * the status, leave it as success and return 0.
+ */
+ return 0;
+ } else if (ret) {
+ return ret;
+ }
+
+ pkt = cmd->resp_pkt;
+ /* Can happen if RFKILL is asserted */
+ if (!pkt) {
+ ret = 0;
+ goto out_free_resp;
+ }
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32_to_cpu(resp->status);
+ out_free_resp:
+ iwl_free_resp(cmd);
+ return ret;
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+ const void *data, u32 *status)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+}
+
+#define IWL_DECLARE_RATE_INFO(r) \
+ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1),
+ IWL_DECLARE_RATE_INFO(2),
+ IWL_DECLARE_RATE_INFO(5),
+ IWL_DECLARE_RATE_INFO(11),
+ IWL_DECLARE_RATE_INFO(6),
+ IWL_DECLARE_RATE_INFO(9),
+ IWL_DECLARE_RATE_INFO(12),
+ IWL_DECLARE_RATE_INFO(18),
+ IWL_DECLARE_RATE_INFO(24),
+ IWL_DECLARE_RATE_INFO(36),
+ IWL_DECLARE_RATE_INFO(48),
+ IWL_DECLARE_RATE_INFO(54),
+};
+
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum ieee80211_band band)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ int idx;
+ int band_offset = 0;
+
+ /* Legacy rate format, search for match in table */
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (fw_rate_idx_to_plcp[idx] == rate)
+ return idx - band_offset;
+
+ return -1;
+}
+
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
+{
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ return fw_rate_idx_to_plcp[rate_idx];
+}
+
+int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+ IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+ le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+ IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+ le16_to_cpu(err_resp->bad_cmd_seq_num),
+ le32_to_cpu(err_resp->error_service));
+ IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+ le64_to_cpu(err_resp->timestamp));
+ return 0;
+}
+
+/*
+ * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
+ * The parameter should also be a combination of ANT_[ABC].
+ */
+u8 first_antenna(u8 mask)
+{
+ BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+ WARN_ON_ONCE(!mask); /* ffs will return 0 if mask is zeroed */
+ return (u8)(BIT(ffs(mask)));
+}
+
+/*
+ * Toggles between TX antennas to send the probe request on.
+ * Receives the bitmask of valid TX antennas and the *index* used
+ * for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
+{
+ u8 ind = last_idx;
+ int i;
+
+ for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
+ ind = (ind + 1) % RATE_MCS_ANT_NUM;
+ if (valid & BIT(ind))
+ return ind;
+ }
+
+ WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+ return last_idx;
+}
+
+static struct {
+ char *name;
+ u8 num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == num)
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 gp3; /* GP3 timer register */
+ u32 ucode_ver; /* uCode version */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_error_event_table table;
+ u32 base;
+
+ base = mvm->error_event_table;
+ if (mvm->cur_ucode == IWL_UCODE_INIT) {
+ if (!base)
+ base = mvm->fw->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = mvm->fw->inst_errlog_ptr;
+ }
+
+ if (base < 0x800000 || base >= 0x80C000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+ table.data1, table.data2, table.data3,
+ table.blink1, table.blink2, table.ilink1,
+ table.ilink2, table.bcon_time, table.gp1,
+ table.gp2, table.gp3, table.ucode_ver,
+ table.hw_ver, table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+ IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+ IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
+ IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+ IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/**
+ * iwl_mvm_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+ u8 flags, bool init)
+{
+ struct iwl_host_cmd cmd = {
+ .id = LQ_CMD,
+ .len = { sizeof(struct iwl_lq_cmd), },
+ .flags = flags,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+ return -EINVAL;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index f8620ecae6b..ff338975728 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 244019cec3e..e7de33128b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index 83ca40321ff..5096f7c96ab 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index d4df976d470..801ff49796d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/pcie/7000.c
new file mode 100644
index 00000000000..6e35b2b7233
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/7000.c
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+
+/* Highest firmware API version supported */
+#define IWL7260_UCODE_API_MAX 6
+#define IWL3160_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL7260_UCODE_API_OK 6
+#define IWL3160_UCODE_API_OK 6
+
+/* Lowest firmware API version supported */
+#define IWL7260_UCODE_API_MIN 6
+#define IWL3160_UCODE_API_MIN 6
+
+/* NVM versions */
+#define IWL7260_NVM_VERSION 0x0a1d
+#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL3160_NVM_VERSION 0x709
+#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
+
+#define IWL7260_FW_PRE "iwlwifi-7260-"
+#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
+
+#define IWL3160_FW_PRE "iwlwifi-3160-"
+#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl7000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_ht_params iwl7000_ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_7000 \
+ .ucode_api_max = IWL7260_UCODE_API_MAX, \
+ .ucode_api_ok = IWL7260_UCODE_API_OK, \
+ .ucode_api_min = IWL7260_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_7000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl7000_base_params, \
+ /* TODO: .bt_params? */ \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+
+const struct iwl_cfg iwl7260_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC7260",
+ .fw_name_pre = IWL7260_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC3160",
+ .fw_name_pre = IWL3160_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL3160_NVM_VERSION,
+ .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
index 82152311d73..c6f8e83c355 100644
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,5 +109,7 @@ extern const struct iwl_cfg iwl6035_2agn_cfg;
extern const struct iwl_cfg iwl105_bgn_cfg;
extern const struct iwl_cfg iwl105_bgn_d_cfg;
extern const struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl3160_ac_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index c2e141af353..7bc0fb9128d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -255,6 +255,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+/* 7000 Series */
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
+
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 20735a008ca..aa2a39a637d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -235,6 +235,7 @@ struct iwl_txq {
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
+ * @reg_lock: protect hw register access
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -248,7 +249,6 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
- struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
@@ -283,6 +283,9 @@ struct iwl_trans_pcie {
/* queue watchdog */
unsigned long wd_timeout;
+
+ /*protect hw register */
+ spinlock_t reg_lock;
};
/**
@@ -326,7 +329,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
-void iwl_pcie_tasklet(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 4e6591d24e6..b0ae06d2456 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -81,10 +81,10 @@
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ * If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
@@ -214,9 +214,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
- * pending. We stop the APM before we sync the interrupts / tasklets
- * because we have to (see comment there). On the other hand, since
- * the APM is stopped, we cannot access the HW (in particular not prph).
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
@@ -594,6 +594,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int index, cmd_index, err, len;
struct iwl_rx_cmd_buffer rxcb = {
._offset = offset,
+ ._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
.truesize = max_len,
@@ -795,11 +796,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
wake_up(&trans_pcie->wait_command_queue);
+ local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
+ local_bh_enable();
}
-void iwl_pcie_tasklet(struct iwl_trans *trans)
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
+ struct iwl_trans *trans = dev_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
@@ -810,6 +814,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
u32 inta_mask;
#endif
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
@@ -854,7 +860,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
handled |= CSR_INT_BIT_HW_ERR;
- return;
+ goto out;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1004,6 +1010,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
+
+out:
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_HANDLED;
}
/******************************************************************************
@@ -1126,7 +1136,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
+ * If we have something to service, the irq thread will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -1166,9 +1176,9 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
#endif
trans_pcie->inta |= inta;
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
+ /* the thread will service interrupts and re-enable them */
if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
+ return IRQ_WAKE_THREAD;
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
@@ -1276,9 +1286,10 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+ if (likely(inta)) {
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ return IRQ_WAKE_THREAD;
+ } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index c57641eb83d..17bedc50e75 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,33 @@
#include "iwl-agn-hw.h"
#include "internal.h"
+static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -733,7 +760,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
synchronize_irq(trans_pcie->pci_dev->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
@@ -779,15 +805,16 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
}
#endif /* CONFIG_PM_SLEEP */
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
+ unsigned long *flags)
{
int ret;
-
- lockdep_assert_held(&trans->reg_lock);
+ struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+ spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
/* this bit wakes up the NIC */
- __iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
@@ -819,18 +846,34 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
val);
+ spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
return false;
}
}
+ /*
+ * Fool sparse by faking we release the lock - sparse will
+ * track nic_access anyway.
+ */
+ __release(&pcie_trans->reg_lock);
return true;
}
-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+ unsigned long *flags)
{
- lockdep_assert_held(&trans->reg_lock);
- __iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&pcie_trans->reg_lock);
+
+ /*
+ * Fool sparse by faking we acquiring the lock - sparse will
+ * track nic_access anyway.
+ */
+ __acquire(&pcie_trans->reg_lock);
+
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -838,6 +881,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
+ spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
@@ -847,16 +891,14 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
int offs, ret = 0;
u32 *vals = buf;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
} else {
ret = -EBUSY;
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
return ret;
}
@@ -867,17 +909,15 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
int offs, ret = 0;
u32 *vals = buf;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (iwl_trans_grab_nic_access(trans, false)) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT,
vals ? vals[offs] : 0);
- iwl_trans_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
} else {
ret = -EBUSY;
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
return ret;
}
@@ -952,6 +992,17 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
return ret;
}
+static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+}
+
static const char *get_fh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -1405,7 +1456,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.configure = iwl_trans_pcie_configure,
.set_pmi = iwl_trans_pcie_set_pmi,
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
- .release_nic_access = iwl_trans_pcie_release_nic_access
+ .release_nic_access = iwl_trans_pcie_release_nic_access,
+ .set_bits_mask = iwl_trans_pcie_set_bits_mask,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1427,8 +1479,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
+ trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
+ spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
/* W/A - seems to solve weird behavior. We need to remove this if we
@@ -1495,7 +1549,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- spin_lock_init(&trans->reg_lock);
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
@@ -1514,15 +1567,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_pcie_tasklet, (unsigned long)trans);
-
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- err = request_irq(pdev->irq, iwl_pcie_isr_ict,
- IRQF_SHARED, DRV_NAME, trans);
- if (err) {
+ if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans)) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index a93f06762b9..8e9e3212fe7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -926,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
@@ -970,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
/*
@@ -1371,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
return;
}
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
@@ -1405,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->flags = 0;
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a7dcb2e5ecc..116f4aba08d 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -657,7 +657,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
}
} else
lbs_deb_scan("scan response: missing BSS channel IE\n");
@@ -1444,7 +1444,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
done:
if (bss)
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1766,7 +1766,7 @@ static void lbs_join_post(struct lbs_private *priv,
params->beacon_interval,
fake_ie, fake - fake_ie,
0, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
@@ -2011,7 +2011,7 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
if (bss) {
ret = lbs_ibss_join_existing(priv, params, bss);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
} else
ret = lbs_ibss_start_new(priv, params);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index b73e497fe77..cffdf4fbf16 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2247,6 +2247,7 @@ static int __init init_mac80211_hwsim(void)
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
hw->sta_data_size = sizeof(struct hwsim_sta_priv);
+ hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
memcpy(data->channels_2ghz, hwsim_channels_2ghz,
sizeof(hwsim_channels_2ghz));
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 7b0ae240708..25596ab0c57 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -400,45 +400,6 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
}
/*
- * This function reconfigures the Tx buffer size in firmware.
- *
- * This function prepares a firmware command and issues it, if
- * the current Tx buffer size is different from the one requested.
- * Maximum configurable Tx buffer size is limited by the HT capability
- * field value.
- */
-void
-mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
-{
- u16 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- u16 tx_buf, curr_tx_buf_size = 0;
-
- if (bss_desc->bcn_ht_cap) {
- if (le16_to_cpu(bss_desc->bcn_ht_cap->cap_info) &
- IEEE80211_HT_CAP_MAX_AMSDU)
- max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_8K;
- else
- max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K;
- }
-
- tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu);
-
- dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n",
- max_amsdu, priv->adapter->max_tx_buf_size);
-
- if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K)
- curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_4K)
- curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
- else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_8K)
- curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_8K;
- if (curr_tx_buf_size != tx_buf)
- mwifiex_send_cmd_async(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
- HostCmd_ACT_GEN_SET, 0, &tx_buf);
-}
-
-/*
* This function checks if the given pointer is valid entry of
* Tx BA Stream table.
*/
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 46006a54a65..29a4c02479d 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -34,8 +34,6 @@ int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 **buffer);
-void mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc);
void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
struct mwifiex_ie_types_htcap *);
int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index b2e27723f80..4f614aad9de 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -20,12 +20,12 @@ config MWIFIEX_SDIO
mwifiex_sdio.
config MWIFIEX_PCIE
- tristate "Marvell WiFi-Ex Driver for PCIE 8766"
+ tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
depends on MWIFIEX && PCI
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8766 chipset with PCIe interface.
+ 8766/8897 chipsets with PCIe interface.
If you choose to build it as a module, it will be called
mwifiex_pcie.
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b55badef466..3d64613ebb2 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -121,7 +121,6 @@ info
wmm_ac_vi = <number of packets sent to device from WMM AcVi queue>
wmm_ac_be = <number of packets sent to device from WMM AcBE queue>
wmm_ac_bk = <number of packets sent to device from WMM AcBK queue>
- max_tx_buf_size = <maximum Tx buffer size>
tx_buf_size = <current Tx buffer size>
curr_tx_buf_size = <current Tx buffer size>
ps_mode = <0/1, CAM mode/PS mode>
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8ba48192cd2..dc5357c0098 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1430,7 +1430,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 46e34aa65d1..753b5682d53 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -58,8 +58,6 @@ static struct mwifiex_debug_data items[] = {
item_addr(packets_out[WMM_AC_BE]), 1},
{"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
item_addr(packets_out[WMM_AC_BK]), 1},
- {"max_tx_buf_size", item_size(max_tx_buf_size),
- item_addr(max_tx_buf_size), 1},
{"tx_buf_size", item_size(tx_buf_size),
item_addr(tx_buf_size), 1},
{"curr_tx_buf_size", item_size(curr_tx_buf_size),
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 84848c33b7f..e38aa9b3663 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -314,7 +314,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->pm_wakeup_fw_try = false;
- adapter->max_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 6095b3e53f4..f3d9d044552 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -178,7 +178,6 @@ struct mwifiex_ds_tx_ba_stream_tbl {
struct mwifiex_debug_info {
u32 int_counter;
u32 packets_out[MAX_NUM_TID];
- u32 max_tx_buf_size;
u32 tx_buf_size;
u32 curr_tx_buf_size;
u32 tx_tbl_num;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 893d809ba83..a537297866c 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -157,8 +157,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
memset(rate1, 0, rate1_size);
- for (i = 0; rate2[i] && i < rate2_size; i++) {
- for (j = 0; tmp[j] && j < rate1_size; j++) {
+ for (i = 0; i < rate2_size && rate2[i]; i++) {
+ for (j = 0; j < rate1_size && tmp[j]; j++) {
/* Check common rate, excluding the bit for
basic rate */
if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) {
@@ -398,8 +398,6 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
pos = (u8 *) assoc;
- mwifiex_cfg_tx_buf(priv, bss_desc);
-
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);
/* Save so we know which BSS Desc to use in the response handler */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 51044e3ea89..ac799a046eb 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -631,7 +631,6 @@ struct mwifiex_adapter {
/* spin lock for main process */
spinlock_t main_proc_lock;
u32 mwifiex_processing;
- u16 max_tx_buf_size;
u16 tx_buf_size;
u16 curr_tx_buf_size;
u32 ioport;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 237949c070c..492655c048d 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -62,6 +62,10 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
{
u32 *cookie_addr;
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ if (!reg->sleep_cookie)
+ return true;
if (card->sleep_cookie_vbase) {
cookie_addr = (u32 *)card->sleep_cookie_vbase;
@@ -94,6 +98,13 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->dev = pdev;
+ if (ent->driver_data) {
+ struct mwifiex_pcie_device *data = (void *)ent->driver_data;
+ card->pcie.firmware = data->firmware;
+ card->pcie.reg = data->reg;
+ card->pcie.blksz_fw_dl = data->blksz_fw_dl;
+ }
+
if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
MWIFIEX_PCIE)) {
pr_err("%s failed\n", __func__);
@@ -230,13 +241,16 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
return 0;
}
-#define PCIE_VENDOR_ID_MARVELL (0x11ab)
-#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
-
static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long) &mwifiex_pcie8766,
+ },
+ {
+ PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long) &mwifiex_pcie8897,
},
{},
};
@@ -289,8 +303,10 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
int i = 0;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- while (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ while (reg->sleep_cookie && mwifiex_pcie_ok_to_access_hw(adapter)) {
i++;
usleep_range(10, 20);
/* 50ms max wait */
@@ -364,25 +380,268 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
}
/*
- * This function creates buffer descriptor ring for TX
+ * This function initializes TX buffer ring descriptors
*/
-static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+static int mwifiex_init_txq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ card->tx_buf_list[i] = NULL;
+ if (reg->pfu_enabled) {
+ card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
+ (sizeof(*desc2) * i);
+ desc2 = card->txbd_ring[i];
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ card->txbd_ring[i] = (void *)card->txbd_ring_vbase +
+ (sizeof(*desc) * i);
+ desc = card->txbd_ring[i];
+ memset(desc, 0, sizeof(*desc));
+ }
+ }
+
+ return 0;
+}
+
+/* This function initializes RX buffer ring descriptors. Each SKB is allocated
+ * here and after mapping PCI memory, its physical address is assigned to
+ * PCIE Rx buffer descriptor's physical address.
+ */
+static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct sk_buff *skb;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ dma_addr_t buf_pa;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ /* Allocate skb here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev,
+ "Unable to allocate skb for RX ring.\n");
+ kfree(card->rxbd_ring_vbase);
+ return -ENOMEM;
+ }
+
+ if (mwifiex_map_pci_memory(adapter, skb,
+ MWIFIEX_RX_DATA_BUF_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+ dev_dbg(adapter->dev,
+ "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
+
+ card->rx_buf_list[i] = skb;
+ if (reg->pfu_enabled) {
+ card->rxbd_ring[i] = (void *)card->rxbd_ring_vbase +
+ (sizeof(*desc2) * i);
+ desc2 = card->rxbd_ring[i];
+ desc2->paddr = buf_pa;
+ desc2->len = (u16)skb->len;
+ desc2->frag_len = (u16)skb->len;
+ desc2->flags = reg->ring_flag_eop | reg->ring_flag_sop;
+ desc2->offset = 0;
+ } else {
+ card->rxbd_ring[i] = (void *)(card->rxbd_ring_vbase +
+ (sizeof(*desc) * i));
+ desc = card->rxbd_ring[i];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* This function initializes event buffer ring descriptors. Each SKB is
+ * allocated here and after mapping PCI memory, its physical address is assigned
+ * to PCIE Rx buffer descriptor's physical address
+ */
+static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ struct mwifiex_evt_buf_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t buf_pa;
int i;
+ for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+ /* Allocate skb here so that firmware can DMA data from it */
+ skb = dev_alloc_skb(MAX_EVENT_SIZE);
+ if (!skb) {
+ dev_err(adapter->dev,
+ "Unable to allocate skb for EVENT buf.\n");
+ kfree(card->evtbd_ring_vbase);
+ return -ENOMEM;
+ }
+ skb_put(skb, MAX_EVENT_SIZE);
+
+ if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
+ PCI_DMA_FROMDEVICE))
+ return -1;
+
+ MWIFIEX_SKB_PACB(skb, &buf_pa);
+
+ dev_dbg(adapter->dev,
+ "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
+
+ card->evt_buf_list[i] = skb;
+ card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
+ (sizeof(*desc) * i));
+ desc = card->evtbd_ring[i];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = 0;
+ }
+
+ return 0;
+}
+
+/* This function cleans up TX buffer rings. If any of the buffer list has valid
+ * SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct sk_buff *skb;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ if (reg->pfu_enabled) {
+ desc2 = card->txbd_ring[i];
+ if (card->tx_buf_list[i]) {
+ skb = card->tx_buf_list[i];
+ pci_unmap_single(card->dev, desc2->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ desc = card->txbd_ring[i];
+ if (card->tx_buf_list[i]) {
+ skb = card->tx_buf_list[i];
+ pci_unmap_single(card->dev, desc->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc, 0, sizeof(*desc));
+ }
+ card->tx_buf_list[i] = NULL;
+ }
+
+ return;
+}
+
+/* This function cleans up RX buffer rings. If any of the buffer list has valid
+ * SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
+ if (reg->pfu_enabled) {
+ desc2 = card->rxbd_ring[i];
+ if (card->rx_buf_list[i]) {
+ skb = card->rx_buf_list[i];
+ pci_unmap_single(card->dev, desc2->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ desc = card->rxbd_ring[i];
+ if (card->rx_buf_list[i]) {
+ skb = card->rx_buf_list[i];
+ pci_unmap_single(card->dev, desc->paddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ memset(desc, 0, sizeof(*desc));
+ }
+ card->rx_buf_list[i] = NULL;
+ }
+
+ return;
+}
+
+/* This function cleans up event buffer rings. If any of the buffer list has
+ * valid SKB address, associated SKB is freed.
+ */
+static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_evt_buf_desc *desc;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
+ desc = card->evtbd_ring[i];
+ if (card->evt_buf_list[i]) {
+ skb = card->evt_buf_list[i];
+ pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ card->evt_buf_list[i] = NULL;
+ memset(desc, 0, sizeof(*desc));
+ }
+
+ return;
+}
+
+/* This function creates buffer descriptor ring for TX
+ */
+static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
/*
* driver maintaines the write pointer and firmware maintaines the read
* pointer. The write pointer starts at 0 (zero) while the read pointer
* starts at zero with rollover bit set
*/
card->txbd_wrptr = 0;
- card->txbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+
+ if (reg->pfu_enabled)
+ card->txbd_rdptr = 0;
+ else
+ card->txbd_rdptr |= reg->tx_rollover_ind;
/* allocate shared memory for the BD ring and divide the same in to
several descriptors */
- card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_TXRX_BD;
+ if (reg->pfu_enabled)
+ card->txbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+ else
+ card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+
dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
card->txbd_ring_size);
card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
@@ -399,40 +658,15 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
(u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- card->txbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->txbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc)
- * i));
-
- card->tx_buf_list[i] = NULL;
- card->txbd_ring[i]->paddr = 0;
- card->txbd_ring[i]->len = 0;
- card->txbd_ring[i]->flags = 0;
- }
-
- return 0;
+ return mwifiex_init_txq_ring(adapter);
}
static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- if (card->tx_buf_list[i]) {
- skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, card->txbd_ring[i]->paddr,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_any(skb);
- }
- card->tx_buf_list[i] = NULL;
- card->txbd_ring[i]->paddr = 0;
- card->txbd_ring[i]->len = 0;
- card->txbd_ring[i]->flags = 0;
- card->txbd_ring[i] = NULL;
- }
+ mwifiex_cleanup_txq_ring(adapter);
if (card->txbd_ring_vbase)
pci_free_consistent(card->dev, card->txbd_ring_size,
@@ -440,7 +674,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_pbase);
card->txbd_ring_size = 0;
card->txbd_wrptr = 0;
- card->txbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->txbd_rdptr = 0 | reg->tx_rollover_ind;
card->txbd_ring_vbase = NULL;
card->txbd_ring_pbase = 0;
@@ -453,9 +687,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
- dma_addr_t buf_pa;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
/*
* driver maintaines the read pointer and firmware maintaines the write
@@ -463,10 +695,15 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
* starts at zero with rollover bit set
*/
card->rxbd_wrptr = 0;
- card->rxbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->rxbd_rdptr = reg->rx_rollover_ind;
+
+ if (reg->pfu_enabled)
+ card->rxbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
+ else
+ card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
+ MWIFIEX_MAX_TXRX_BD;
- card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_TXRX_BD;
dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
card->rxbd_ring_size);
card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
@@ -485,39 +722,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->rxbd_ring_pbase >> 32),
card->rxbd_ring_size);
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- card->rxbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->rxbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc)
- * i));
-
- /* Allocate skb here so that firmware can DMA data from it */
- skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
- if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for RX ring.\n");
- kfree(card->rxbd_ring_vbase);
- return -ENOMEM;
- }
- if (mwifiex_map_pci_memory(adapter, skb,
- MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
- return -1;
-
- MWIFIEX_SKB_PACB(skb, &buf_pa);
-
- dev_dbg(adapter->dev, "info: RX ring: add new skb base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
- skb, skb->data, (u32)buf_pa, (u32)((u64)buf_pa >> 32),
- skb->len);
-
- card->rx_buf_list[i] = skb;
- card->rxbd_ring[i]->paddr = buf_pa;
- card->rxbd_ring[i]->len = (u16)skb->len;
- card->rxbd_ring[i]->flags = 0;
- }
-
- return 0;
+ return mwifiex_init_rxq_ring(adapter);
}
/*
@@ -526,23 +731,9 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
- if (card->rx_buf_list[i]) {
- skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, card->rxbd_ring[i]->paddr ,
- MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(skb);
- }
- card->rx_buf_list[i] = NULL;
- card->rxbd_ring[i]->paddr = 0;
- card->rxbd_ring[i]->len = 0;
- card->rxbd_ring[i]->flags = 0;
- card->rxbd_ring[i] = NULL;
- }
+ mwifiex_cleanup_rxq_ring(adapter);
if (card->rxbd_ring_vbase)
pci_free_consistent(card->dev, card->rxbd_ring_size,
@@ -550,7 +741,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
card->rxbd_ring_pbase);
card->rxbd_ring_size = 0;
card->rxbd_wrptr = 0;
- card->rxbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
card->rxbd_ring_vbase = NULL;
card->rxbd_ring_pbase = 0;
@@ -563,9 +754,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
- dma_addr_t buf_pa;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
/*
* driver maintaines the read pointer and firmware maintaines the write
@@ -573,10 +762,11 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
* starts at zero with rollover bit set
*/
card->evtbd_wrptr = 0;
- card->evtbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->evtbd_rdptr = reg->evt_rollover_ind;
+
+ card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
+ MWIFIEX_MAX_EVT_BD;
- card->evtbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_EVT_BD;
dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
card->evtbd_ring_size);
card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
@@ -595,39 +785,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->evtbd_ring_pbase >> 32),
card->evtbd_ring_size);
- for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
- card->evtbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->evtbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc)
- * i));
-
- /* Allocate skb here so that firmware can DMA data from it */
- skb = dev_alloc_skb(MAX_EVENT_SIZE);
- if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for EVENT buf.\n");
- kfree(card->evtbd_ring_vbase);
- return -ENOMEM;
- }
- skb_put(skb, MAX_EVENT_SIZE);
-
- if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
- return -1;
-
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- dev_dbg(adapter->dev, "info: Evt ring: add new skb. base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
- skb, skb->data, (u32)buf_pa, (u32)((u64)buf_pa >> 32),
- skb->len);
-
- card->evt_buf_list[i] = skb;
- card->evtbd_ring[i]->paddr = buf_pa;
- card->evtbd_ring[i]->len = (u16)skb->len;
- card->evtbd_ring[i]->flags = 0;
- }
-
- return 0;
+ return mwifiex_pcie_init_evt_ring(adapter);
}
/*
@@ -636,29 +794,16 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct sk_buff *skb;
- int i;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
- if (card->evt_buf_list[i]) {
- skb = card->evt_buf_list[i];
- pci_unmap_single(card->dev, card->evtbd_ring[i]->paddr,
- MAX_EVENT_SIZE, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(skb);
- }
- card->evt_buf_list[i] = NULL;
- card->evtbd_ring[i]->paddr = 0;
- card->evtbd_ring[i]->len = 0;
- card->evtbd_ring[i]->flags = 0;
- card->evtbd_ring[i] = NULL;
- }
+ mwifiex_cleanup_evt_ring(adapter);
if (card->evtbd_ring_vbase)
pci_free_consistent(card->dev, card->evtbd_ring_size,
card->evtbd_ring_vbase,
card->evtbd_ring_pbase);
card->evtbd_wrptr = 0;
- card->evtbd_rdptr = 0 | MWIFIEX_BD_FLAG_ROLLOVER_IND;
+ card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
card->evtbd_ring_size = 0;
card->evtbd_ring_vbase = NULL;
card->evtbd_ring_pbase = 0;
@@ -771,12 +916,13 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr;
/* Read the TX ring read pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
+ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
dev_err(adapter->dev,
- "Flush TXBD: failed to read REG_TXBD_RDPTR\n");
+ "Flush TXBD: failed to read reg->tx_rdptr\n");
return -1;
}
@@ -800,31 +946,35 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
{
- const u32 num_tx_buffs = MWIFIEX_MAX_TXRX_BD;
struct sk_buff *skb;
dma_addr_t buf_pa;
- u32 wrdoneidx, rdptr, unmap_count = 0;
+ u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
mwifiex_pm_wakeup_card(adapter);
/* Read the TX ring read pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
+ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
dev_err(adapter->dev,
- "SEND COMP: failed to read REG_TXBD_RDPTR\n");
+ "SEND COMP: failed to read reg->tx_rdptr\n");
return -1;
}
dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
card->txbd_rdptr, rdptr);
+ num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
/* free from previous txbd_rdptr to current txbd_rdptr */
- while (((card->txbd_rdptr & MWIFIEX_TXBD_MASK) !=
- (rdptr & MWIFIEX_TXBD_MASK)) ||
- ((card->txbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
- (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
- wrdoneidx = card->txbd_rdptr & MWIFIEX_TXBD_MASK;
+ while (((card->txbd_rdptr & reg->tx_mask) !=
+ (rdptr & reg->tx_mask)) ||
+ ((card->txbd_rdptr & reg->tx_rollover_ind) !=
+ (rdptr & reg->tx_rollover_ind))) {
+ wrdoneidx = (card->txbd_rdptr & reg->tx_mask) >>
+ reg->tx_start_ptr;
skb = card->tx_buf_list[wrdoneidx];
if (skb) {
@@ -845,25 +995,38 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
}
card->tx_buf_list[wrdoneidx] = NULL;
- card->txbd_ring[wrdoneidx]->paddr = 0;
- card->rxbd_ring[wrdoneidx]->len = 0;
- card->rxbd_ring[wrdoneidx]->flags = 0;
- card->txbd_rdptr++;
- if ((card->txbd_rdptr & MWIFIEX_TXBD_MASK) == num_tx_buffs)
+ if (reg->pfu_enabled) {
+ desc2 = (void *)card->txbd_ring[wrdoneidx];
+ memset(desc2, 0, sizeof(*desc2));
+ } else {
+ desc = card->txbd_ring[wrdoneidx];
+ memset(desc, 0, sizeof(*desc));
+ }
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ card->txbd_rdptr++;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ card->txbd_rdptr += reg->ring_tx_start_ptr;
+ break;
+ }
+
+
+ if ((card->txbd_rdptr & reg->tx_mask) == num_tx_buffs)
card->txbd_rdptr = ((card->txbd_rdptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ reg->tx_rollover_ind) ^
+ reg->tx_rollover_ind);
}
if (unmap_count)
adapter->data_sent = false;
if (card->txbd_flush) {
- if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
- (card->txbd_rdptr & MWIFIEX_TXBD_MASK)) &&
- ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
- (card->txbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND)))
+ if (((card->txbd_wrptr & reg->tx_mask) ==
+ (card->txbd_rdptr & reg->tx_mask)) &&
+ ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+ (card->txbd_rdptr & reg->tx_rollover_ind)))
card->txbd_flush = 0;
else
mwifiex_clean_pcie_ring_buf(adapter);
@@ -883,9 +1046,12 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
struct pcie_service_card *card = adapter->card;
- u32 wrindx;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ u32 wrindx, num_tx_buffs, rx_val;
int ret;
dma_addr_t buf_pa;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
__le16 *tmp;
if (!(skb->data && skb->len)) {
@@ -897,6 +1063,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
if (!mwifiex_pcie_ok_to_access_hw(adapter))
mwifiex_pm_wakeup_card(adapter);
+ num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
card->txbd_rdptr, card->txbd_wrptr);
if (mwifiex_pcie_txbd_not_full(card)) {
@@ -913,25 +1080,46 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
PCI_DMA_TODEVICE))
return -1;
- wrindx = card->txbd_wrptr & MWIFIEX_TXBD_MASK;
+ wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
MWIFIEX_SKB_PACB(skb, &buf_pa);
card->tx_buf_list[wrindx] = skb;
- card->txbd_ring[wrindx]->paddr = buf_pa;
- card->txbd_ring[wrindx]->len = (u16)skb->len;
- card->txbd_ring[wrindx]->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
- MWIFIEX_BD_FLAG_LAST_DESC;
- if ((++card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
- MWIFIEX_MAX_TXRX_BD)
+ if (reg->pfu_enabled) {
+ desc2 = (void *)card->txbd_ring[wrindx];
+ desc2->paddr = buf_pa;
+ desc2->len = (u16)skb->len;
+ desc2->frag_len = (u16)skb->len;
+ desc2->offset = 0;
+ desc2->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+ MWIFIEX_BD_FLAG_LAST_DESC;
+ } else {
+ desc = card->txbd_ring[wrindx];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = MWIFIEX_BD_FLAG_FIRST_DESC |
+ MWIFIEX_BD_FLAG_LAST_DESC;
+ }
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ card->txbd_wrptr++;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ card->txbd_wrptr += reg->ring_tx_start_ptr;
+ break;
+ }
+
+ if ((card->txbd_wrptr & reg->tx_mask) == num_tx_buffs)
card->txbd_wrptr = ((card->txbd_wrptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ reg->tx_rollover_ind) ^
+ reg->tx_rollover_ind);
- /* Write the TX ring write pointer in to REG_TXBD_WRPTR */
- if (mwifiex_write_reg(adapter, REG_TXBD_WRPTR,
- card->txbd_wrptr)) {
+ rx_val = card->rxbd_rdptr & reg->rx_wrap_mask;
+ /* Write the TX ring write pointer in to reg->tx_wrptr */
+ if (mwifiex_write_reg(adapter, reg->tx_wrptr,
+ card->txbd_wrptr | rx_val)) {
dev_err(adapter->dev,
- "SEND DATA: failed to write REG_TXBD_WRPTR\n");
+ "SEND DATA: failed to write reg->tx_wrptr\n");
ret = -1;
goto done_unmap;
}
@@ -971,9 +1159,11 @@ done_unmap:
MWIFIEX_SKB_PACB(skb, &buf_pa);
pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
card->tx_buf_list[wrindx] = NULL;
- card->txbd_ring[wrindx]->paddr = 0;
- card->txbd_ring[wrindx]->len = 0;
- card->txbd_ring[wrindx]->flags = 0;
+ if (reg->pfu_enabled)
+ memset(desc2, 0, sizeof(*desc2));
+ else
+ memset(desc, 0, sizeof(*desc));
+
return ret;
}
@@ -984,32 +1174,35 @@ done_unmap:
static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- u32 wrptr, rd_index;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ u32 wrptr, rd_index, tx_val;
dma_addr_t buf_pa;
int ret = 0;
struct sk_buff *skb_tmp = NULL;
+ struct mwifiex_pcie_buf_desc *desc;
+ struct mwifiex_pfu_buf_desc *desc2;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
mwifiex_pm_wakeup_card(adapter);
/* Read the RX ring Write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "RECV DATA: failed to read REG_TXBD_RDPTR\n");
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
card->rxbd_wrptr = wrptr;
- while (((wrptr & MWIFIEX_RXBD_MASK) !=
- (card->rxbd_rdptr & MWIFIEX_RXBD_MASK)) ||
- ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
- (card->rxbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ while (((wrptr & reg->rx_mask) !=
+ (card->rxbd_rdptr & reg->rx_mask)) ||
+ ((wrptr & reg->rx_rollover_ind) ==
+ (card->rxbd_rdptr & reg->rx_rollover_ind))) {
struct sk_buff *skb_data;
u16 rx_len;
__le16 pkt_len;
- rd_index = card->rxbd_rdptr & MWIFIEX_RXBD_MASK;
+ rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
MWIFIEX_SKB_PACB(skb_data, &buf_pa);
@@ -1047,32 +1240,44 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
"RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
skb_tmp, rd_index);
card->rx_buf_list[rd_index] = skb_tmp;
- card->rxbd_ring[rd_index]->paddr = buf_pa;
- card->rxbd_ring[rd_index]->len = skb_tmp->len;
- card->rxbd_ring[rd_index]->flags = 0;
- if ((++card->rxbd_rdptr & MWIFIEX_RXBD_MASK) ==
+ if (reg->pfu_enabled) {
+ desc2 = (void *)card->rxbd_ring[rd_index];
+ desc2->paddr = buf_pa;
+ desc2->len = skb_tmp->len;
+ desc2->frag_len = skb_tmp->len;
+ desc2->offset = 0;
+ desc2->flags = reg->ring_flag_sop | reg->ring_flag_eop;
+ } else {
+ desc = card->rxbd_ring[rd_index];
+ desc->paddr = buf_pa;
+ desc->len = skb_tmp->len;
+ desc->flags = 0;
+ }
+
+ if ((++card->rxbd_rdptr & reg->rx_mask) ==
MWIFIEX_MAX_TXRX_BD) {
card->rxbd_rdptr = ((card->rxbd_rdptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ reg->rx_rollover_ind) ^
+ reg->rx_rollover_ind);
}
dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
card->rxbd_rdptr, wrptr);
- /* Write the RX ring read pointer in to REG_RXBD_RDPTR */
- if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR,
- card->rxbd_rdptr)) {
+ tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
+ /* Write the RX ring read pointer in to reg->rx_rdptr */
+ if (mwifiex_write_reg(adapter, reg->rx_rdptr,
+ card->rxbd_rdptr | tx_val)) {
dev_err(adapter->dev,
- "RECV DATA: failed to write REG_RXBD_RDPTR\n");
+ "RECV DATA: failed to write reg->rx_rdptr\n");
ret = -1;
goto done;
}
/* Read the RX ring Write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "RECV DATA: failed to read REG_TXBD_RDPTR\n");
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
@@ -1093,6 +1298,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
{
dma_addr_t buf_pa;
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!(skb->data && skb->len)) {
dev_err(adapter->dev,
@@ -1106,9 +1312,10 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
MWIFIEX_SKB_PACB(skb, &buf_pa);
- /* Write the lower 32bits of the physical address to scratch
- * register 0 */
- if (mwifiex_write_reg(adapter, PCIE_SCRATCH_0_REG, (u32)buf_pa)) {
+ /* Write the lower 32bits of the physical address to low command
+ * address scratch register
+ */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
@@ -1117,9 +1324,10 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
- /* Write the upper 32bits of the physical address to scratch
- * register 1 */
- if (mwifiex_write_reg(adapter, PCIE_SCRATCH_1_REG,
+ /* Write the upper 32bits of the physical address to high command
+ * address scratch register
+ */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
(u32)((u64)buf_pa >> 32))) {
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
@@ -1129,10 +1337,10 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
- /* Write the command length to scratch register 2 */
- if (mwifiex_write_reg(adapter, PCIE_SCRATCH_2_REG, skb->len)) {
+ /* Write the command length to cmd_size scratch register */
+ if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
dev_err(adapter->dev,
- "%s: failed to write command len to scratch reg 2\n",
+ "%s: failed to write command len to cmd_size scratch reg\n",
__func__);
pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
PCI_DMA_TODEVICE);
@@ -1158,11 +1366,14 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask;
- /* Write the RX ring read pointer in to REG_RXBD_RDPTR */
- if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR, card->rxbd_rdptr | 0)) {
+ /* Write the RX ring read pointer in to reg->rx_rdptr */
+ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
+ tx_wrap)) {
dev_err(adapter->dev,
- "RECV DATA: failed to write REG_RXBD_RDPTR\n");
+ "RECV DATA: failed to write reg->rx_rdptr\n");
return -1;
}
return 0;
@@ -1174,6 +1385,7 @@ static int
mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
int ret = 0;
dma_addr_t cmd_buf_pa, cmdrsp_buf_pa;
u8 *payload = (u8 *)skb->data;
@@ -1206,7 +1418,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* To send a command, the driver will:
1. Write the 64bit physical address of the data buffer to
- SCRATCH1 + SCRATCH0
+ cmd response address low + cmd response address high
2. Ring the door bell (i.e. set the door bell interrupt)
In response to door bell interrupt, the firmware will perform
@@ -1218,7 +1430,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
/* Write the lower 32bits of the cmdrsp buffer physical
address */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO,
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
(u32)cmdrsp_buf_pa)) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
@@ -1227,7 +1439,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI,
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
(u32)((u64)cmdrsp_buf_pa >> 32))) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
@@ -1237,15 +1449,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
- /* Write the lower 32bits of the physical address to REG_CMD_ADDR_LO */
- if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO, (u32)cmd_buf_pa)) {
+ /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
+ (u32)cmd_buf_pa)) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
- /* Write the upper 32bits of the physical address to REG_CMD_ADDR_HI */
- if (mwifiex_write_reg(adapter, REG_CMD_ADDR_HI,
+ /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
+ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
(u32)((u64)cmd_buf_pa >> 32))) {
dev_err(adapter->dev,
"Failed to write download cmd to boot code.\n");
@@ -1253,10 +1466,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
goto done;
}
- /* Write the command length to REG_CMD_SIZE */
- if (mwifiex_write_reg(adapter, REG_CMD_SIZE, card->cmd_buf->len)) {
+ /* Write the command length to reg->cmd_size */
+ if (mwifiex_write_reg(adapter, reg->cmd_size,
+ card->cmd_buf->len)) {
dev_err(adapter->dev,
- "Failed to write cmd len to REG_CMD_SIZE\n");
+ "Failed to write cmd len to reg->cmd_size\n");
ret = -1;
goto done;
}
@@ -1283,6 +1497,7 @@ done:
static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
struct sk_buff *skb = card->cmdrsp_buf;
int count = 0;
u16 rx_len;
@@ -1304,8 +1519,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb->len);
- while (mwifiex_pcie_ok_to_access_hw(adapter) &&
- (count++ < 10))
+ while (reg->sleep_cookie && (count++ < 10) &&
+ mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
} else {
dev_err(adapter->dev,
@@ -1328,14 +1543,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
/* Clear the cmd-rsp buffer address in scratch registers. This
will prevent firmware from writing to the same response
buffer again. */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, 0)) {
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
dev_err(adapter->dev,
"cmd_done: failed to clear cmd_rsp_addr_lo\n");
return -1;
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
- if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, 0)) {
+ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
dev_err(adapter->dev,
"cmd_done: failed to clear cmd_rsp_addr_hi\n");
return -1;
@@ -1380,9 +1595,11 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr, event;
dma_addr_t buf_pa;
+ struct mwifiex_evt_buf_desc *desc;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
mwifiex_pm_wakeup_card(adapter);
@@ -1399,9 +1616,9 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
}
/* Read the event ring write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "EventReady: failed to read REG_EVTBD_WRPTR\n");
+ "EventReady: failed to read reg->evt_wrptr\n");
return -1;
}
@@ -1409,8 +1626,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
card->evtbd_rdptr, wrptr);
if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
& MWIFIEX_EVTBD_MASK)) ||
- ((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
- (card->evtbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
+ ((wrptr & reg->evt_rollover_ind) ==
+ (card->evtbd_rdptr & reg->evt_rollover_ind))) {
struct sk_buff *skb_cmd;
__le16 data_len = 0;
u16 evt_len;
@@ -1424,9 +1641,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
card->evt_buf_list[rdptr] = NULL;
- card->evtbd_ring[rdptr]->paddr = 0;
- card->evtbd_ring[rdptr]->len = 0;
- card->evtbd_ring[rdptr]->flags = 0;
+ desc = card->evtbd_ring[rdptr];
+ memset(desc, 0, sizeof(*desc));
event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN];
adapter->event_cause = event;
@@ -1462,10 +1678,12 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
int ret = 0;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr;
dma_addr_t buf_pa;
+ struct mwifiex_evt_buf_desc *desc;
if (!skb)
return 0;
@@ -1477,9 +1695,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
}
/* Read the event ring write pointer set by firmware */
- if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
+ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
dev_err(adapter->dev,
- "event_complete: failed to read REG_EVTBD_WRPTR\n");
+ "event_complete: failed to read reg->evt_wrptr\n");
return -1;
}
@@ -1492,9 +1710,10 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
MWIFIEX_SKB_PACB(skb, &buf_pa);
card->evt_buf_list[rdptr] = skb;
MWIFIEX_SKB_PACB(skb, &buf_pa);
- card->evtbd_ring[rdptr]->paddr = buf_pa;
- card->evtbd_ring[rdptr]->len = (u16)skb->len;
- card->evtbd_ring[rdptr]->flags = 0;
+ desc = card->evtbd_ring[rdptr];
+ desc->paddr = buf_pa;
+ desc->len = (u16)skb->len;
+ desc->flags = 0;
skb = NULL;
} else {
dev_dbg(adapter->dev,
@@ -1504,17 +1723,18 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
card->evtbd_rdptr = ((card->evtbd_rdptr &
- MWIFIEX_BD_FLAG_ROLLOVER_IND) ^
- MWIFIEX_BD_FLAG_ROLLOVER_IND);
+ reg->evt_rollover_ind) ^
+ reg->evt_rollover_ind);
}
dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
card->evtbd_rdptr, wrptr);
- /* Write the event ring read pointer in to REG_EVTBD_RDPTR */
- if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
+ /* Write the event ring read pointer in to reg->evt_rdptr */
+ if (mwifiex_write_reg(adapter, reg->evt_rdptr,
+ card->evtbd_rdptr)) {
dev_err(adapter->dev,
- "event_complete: failed to read REG_EVTBD_RDPTR\n");
+ "event_complete: failed to read reg->evt_rdptr\n");
return -1;
}
@@ -1543,6 +1763,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u32 block_retry_cnt = 0;
dma_addr_t buf_pa;
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!firmware || !firmware_len) {
dev_err(adapter->dev,
@@ -1574,7 +1795,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
break;
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
- ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_2_REG,
+ ret = mwifiex_read_reg(adapter, reg->cmd_size,
&len);
if (ret) {
dev_warn(adapter->dev,
@@ -1620,16 +1841,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, ".");
- tx_blocks = (txlen +
- MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) /
- MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD;
+ tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
+ card->pcie.blksz_fw_dl;
/* Copy payload to buffer */
memmove(skb->data, &firmware[offset], txlen);
}
skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
- skb_trim(skb, tx_blocks * MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD);
+ skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl);
/* Send the boot command to device */
if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
@@ -1682,6 +1902,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
{
int ret = 0;
u32 firmware_stat, winner_status;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 tries;
/* Mask spurios interrupts */
@@ -1692,7 +1914,8 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
}
dev_dbg(adapter->dev, "Setting driver ready signature\n");
- if (mwifiex_write_reg(adapter, REG_DRV_READY, FIRMWARE_READY_PCIE)) {
+ if (mwifiex_write_reg(adapter, reg->drv_rdy,
+ FIRMWARE_READY_PCIE)) {
dev_err(adapter->dev,
"Failed to write driver ready signature\n");
return -1;
@@ -1700,7 +1923,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
/* Wait for firmware initialization event */
for (tries = 0; tries < poll_num; tries++) {
- if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+ if (mwifiex_read_reg(adapter, reg->fw_status,
&firmware_stat))
ret = -1;
else
@@ -1717,7 +1940,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
}
if (ret) {
- if (mwifiex_read_reg(adapter, PCIE_SCRATCH_3_REG,
+ if (mwifiex_read_reg(adapter, reg->fw_status,
&winner_status))
ret = -1;
else if (!winner_status) {
@@ -1955,6 +2178,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
int ret;
struct pci_dev *pdev = card->dev;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
pci_set_drvdata(pdev, card);
@@ -1985,6 +2209,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
card->pci_mmap = pci_iomap(pdev, 0, 0);
if (!card->pci_mmap) {
dev_err(adapter->dev, "iomap(0) error\n");
+ ret = -EIO;
goto err_iomap0;
}
ret = pci_request_region(pdev, 2, DRV_NAME);
@@ -1995,6 +2220,7 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
card->pci_mmap1 = pci_iomap(pdev, 2, 0);
if (!card->pci_mmap1) {
dev_err(adapter->dev, "iomap(2) error\n");
+ ret = -EIO;
goto err_iomap2;
}
@@ -2015,10 +2241,13 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
if (ret)
goto err_alloc_cmdbuf;
- ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
- if (ret)
- goto err_alloc_cookie;
-
+ if (reg->sleep_cookie) {
+ ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+ if (ret)
+ goto err_alloc_cookie;
+ } else {
+ card->sleep_cookie_vbase = NULL;
+ }
return ret;
err_alloc_cookie:
@@ -2059,10 +2288,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
struct pci_dev *pdev = card->dev;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (user_rmmod) {
dev_dbg(adapter->dev, "Clearing driver ready signature\n");
- if (mwifiex_write_reg(adapter, REG_DRV_READY, 0x00000000))
+ if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
dev_err(adapter->dev,
"Failed to write driver not-ready signature\n");
}
@@ -2100,7 +2330,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
}
adapter->dev = &pdev->dev;
- strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+ strcpy(adapter->fw_name, card->pcie.firmware);
return 0;
}
@@ -2114,12 +2344,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg;
if (card) {
dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
free_irq(card->dev->irq, card->dev);
- mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+ reg = card->pcie.reg;
+ if (reg->sleep_cookie)
+ mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
mwifiex_pcie_delete_cmdrsp_buf(adapter);
mwifiex_pcie_delete_evtbd_ring(adapter);
mwifiex_pcie_delete_rxbd_ring(adapter);
@@ -2160,7 +2394,7 @@ static int mwifiex_pcie_init_module(void)
{
int ret;
- pr_debug("Marvell 8766 PCIe Driver\n");
+ pr_debug("Marvell PCIe Driver\n");
sema_init(&add_remove_card_sem, 1);
@@ -2203,4 +2437,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
MODULE_VERSION(PCIE_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/pcie8766_uapsta.bin");
+MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 37eeb2ca6b2..d322ab8604e 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -29,6 +29,11 @@
#include "main.h"
#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
+#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+
+#define PCIE_VENDOR_ID_MARVELL (0x11ab)
+#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
+#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
@@ -57,6 +62,8 @@
#define PCIE_SCRATCH_10_REG 0xCE8
#define PCIE_SCRATCH_11_REG 0xCEC
#define PCIE_SCRATCH_12_REG 0xCF0
+#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C
+#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C
#define CPU_INTR_DNLD_RDY BIT(0)
#define CPU_INTR_DOOR_BELL BIT(1)
@@ -75,27 +82,14 @@
#define MWIFIEX_BD_FLAG_ROLLOVER_IND BIT(7)
#define MWIFIEX_BD_FLAG_FIRST_DESC BIT(0)
#define MWIFIEX_BD_FLAG_LAST_DESC BIT(1)
-#define REG_CMD_ADDR_LO PCIE_SCRATCH_0_REG
-#define REG_CMD_ADDR_HI PCIE_SCRATCH_1_REG
-#define REG_CMD_SIZE PCIE_SCRATCH_2_REG
-
-#define REG_CMDRSP_ADDR_LO PCIE_SCRATCH_4_REG
-#define REG_CMDRSP_ADDR_HI PCIE_SCRATCH_5_REG
-
-/* TX buffer description read pointer */
-#define REG_TXBD_RDPTR PCIE_SCRATCH_6_REG
-/* TX buffer description write pointer */
-#define REG_TXBD_WRPTR PCIE_SCRATCH_7_REG
-/* RX buffer description read pointer */
-#define REG_RXBD_RDPTR PCIE_SCRATCH_8_REG
-/* RX buffer description write pointer */
-#define REG_RXBD_WRPTR PCIE_SCRATCH_9_REG
-/* Event buffer description read pointer */
-#define REG_EVTBD_RDPTR PCIE_SCRATCH_10_REG
-/* Event buffer description write pointer */
-#define REG_EVTBD_WRPTR PCIE_SCRATCH_11_REG
-/* Driver ready signature write pointer */
-#define REG_DRV_READY PCIE_SCRATCH_12_REG
+#define MWIFIEX_BD_FLAG_SOP BIT(0)
+#define MWIFIEX_BD_FLAG_EOP BIT(1)
+#define MWIFIEX_BD_FLAG_XS_SOP BIT(2)
+#define MWIFIEX_BD_FLAG_XS_EOP BIT(3)
+#define MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND BIT(7)
+#define MWIFIEX_BD_FLAG_RX_ROLLOVER_IND BIT(10)
+#define MWIFIEX_BD_FLAG_TX_START_PTR BIT(16)
+#define MWIFIEX_BD_FLAG_TX_ROLLOVER_IND BIT(26)
/* Max retry number of command write */
#define MAX_WRITE_IOMEM_RETRY 2
@@ -104,15 +98,142 @@
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
+struct mwifiex_pcie_card_reg {
+ u16 cmd_addr_lo;
+ u16 cmd_addr_hi;
+ u16 fw_status;
+ u16 cmd_size;
+ u16 cmdrsp_addr_lo;
+ u16 cmdrsp_addr_hi;
+ u16 tx_rdptr;
+ u16 tx_wrptr;
+ u16 rx_rdptr;
+ u16 rx_wrptr;
+ u16 evt_rdptr;
+ u16 evt_wrptr;
+ u16 drv_rdy;
+ u16 tx_start_ptr;
+ u32 tx_mask;
+ u32 tx_wrap_mask;
+ u32 rx_mask;
+ u32 rx_wrap_mask;
+ u32 tx_rollover_ind;
+ u32 rx_rollover_ind;
+ u32 evt_rollover_ind;
+ u8 ring_flag_sop;
+ u8 ring_flag_eop;
+ u8 ring_flag_xs_sop;
+ u8 ring_flag_xs_eop;
+ u32 ring_tx_start_ptr;
+ u8 pfu_enabled;
+ u8 sleep_cookie;
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_SCRATCH_6_REG,
+ .tx_wrptr = PCIE_SCRATCH_7_REG,
+ .rx_rdptr = PCIE_SCRATCH_8_REG,
+ .rx_wrptr = PCIE_SCRATCH_9_REG,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 0,
+ .tx_mask = MWIFIEX_TXBD_MASK,
+ .tx_wrap_mask = 0,
+ .rx_mask = MWIFIEX_RXBD_MASK,
+ .rx_wrap_mask = 0,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .ring_flag_sop = 0,
+ .ring_flag_eop = 0,
+ .ring_flag_xs_sop = 0,
+ .ring_flag_xs_eop = 0,
+ .ring_tx_start_ptr = 0,
+ .pfu_enabled = 0,
+ .sleep_cookie = 1,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x03FF0000,
+ .tx_wrap_mask = 0x07FF0000,
+ .rx_mask = 0x000003FF,
+ .rx_wrap_mask = 0x000007FF,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
+};
+
+struct mwifiex_pcie_device {
+ const char *firmware;
+ const struct mwifiex_pcie_card_reg *reg;
+ u16 blksz_fw_dl;
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
+ .firmware = PCIE8766_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_8766,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
+ .firmware = PCIE8897_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_8897,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+};
+
+struct mwifiex_evt_buf_desc {
+ u64 paddr;
+ u16 len;
+ u16 flags;
+} __packed;
+
struct mwifiex_pcie_buf_desc {
u64 paddr;
u16 len;
u16 flags;
} __packed;
+struct mwifiex_pfu_buf_desc {
+ u16 flags;
+ u16 offset;
+ u16 frag_len;
+ u16 len;
+ u64 paddr;
+ u32 reserved;
+} __packed;
+
struct pcie_service_card {
struct pci_dev *dev;
struct mwifiex_adapter *adapter;
+ struct mwifiex_pcie_device pcie;
u8 txbd_flush;
u32 txbd_wrptr;
@@ -120,7 +241,7 @@ struct pcie_service_card {
u32 txbd_ring_size;
u8 *txbd_ring_vbase;
dma_addr_t txbd_ring_pbase;
- struct mwifiex_pcie_buf_desc *txbd_ring[MWIFIEX_MAX_TXRX_BD];
+ void *txbd_ring[MWIFIEX_MAX_TXRX_BD];
struct sk_buff *tx_buf_list[MWIFIEX_MAX_TXRX_BD];
u32 rxbd_wrptr;
@@ -128,7 +249,7 @@ struct pcie_service_card {
u32 rxbd_ring_size;
u8 *rxbd_ring_vbase;
dma_addr_t rxbd_ring_pbase;
- struct mwifiex_pcie_buf_desc *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
+ void *rxbd_ring[MWIFIEX_MAX_TXRX_BD];
struct sk_buff *rx_buf_list[MWIFIEX_MAX_TXRX_BD];
u32 evtbd_wrptr;
@@ -136,7 +257,7 @@ struct pcie_service_card {
u32 evtbd_ring_size;
u8 *evtbd_ring_vbase;
dma_addr_t evtbd_ring_pbase;
- struct mwifiex_pcie_buf_desc *evtbd_ring[MWIFIEX_MAX_EVT_BD];
+ void *evtbd_ring[MWIFIEX_MAX_EVT_BD];
struct sk_buff *evt_buf_list[MWIFIEX_MAX_EVT_BD];
struct sk_buff *cmd_buf;
@@ -150,11 +271,24 @@ struct pcie_service_card {
static inline int
mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr)
{
- if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) ==
- (rdptr & MWIFIEX_TXBD_MASK)) &&
- ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
- (rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND)))
- return 1;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ if (((card->txbd_wrptr & reg->tx_mask) ==
+ (rdptr & reg->tx_mask)) &&
+ ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+ (rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ if (((card->txbd_wrptr & reg->tx_mask) ==
+ (rdptr & reg->tx_mask)) &&
+ ((card->txbd_wrptr & reg->tx_rollover_ind) ==
+ (rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ }
return 0;
}
@@ -162,11 +296,24 @@ mwifiex_pcie_txbd_empty(struct pcie_service_card *card, u32 rdptr)
static inline int
mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
{
- if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) !=
- (card->txbd_rdptr & MWIFIEX_TXBD_MASK)) ||
- ((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
- (card->txbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND)))
- return 1;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ if (((card->txbd_wrptr & reg->tx_mask) !=
+ (card->txbd_rdptr & reg->tx_mask)) ||
+ ((card->txbd_wrptr & reg->tx_rollover_ind) !=
+ (card->txbd_rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ if (((card->txbd_wrptr & reg->tx_mask) !=
+ (card->txbd_rdptr & reg->tx_mask)) ||
+ ((card->txbd_wrptr & reg->tx_rollover_ind) ==
+ (card->txbd_rdptr & reg->tx_rollover_ind)))
+ return 1;
+ break;
+ }
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index f0de40166dc..e0cce1b52d5 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1557,7 +1557,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
scan_rsp->number_of_sets);
ret = -1;
- goto done;
+ goto check_next_scan;
}
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1628,7 +1628,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (!beacon_size || beacon_size > bytes_left) {
bss_info += bytes_left;
bytes_left = 0;
- return -1;
+ ret = -1;
+ goto check_next_scan;
}
/* Initialize the current working beacon pointer for this BSS
@@ -1684,7 +1685,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(priv->adapter->dev,
"%s: bytes left < IE length\n",
__func__);
- goto done;
+ goto check_next_scan;
}
if (element_id == WLAN_EID_DS_PARAMS) {
channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1740,13 +1741,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
.mac_address, ETH_ALEN))
mwifiex_update_curr_bss_params(priv,
bss);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
}
} else {
dev_dbg(adapter->dev, "missing BSS channel IE\n");
}
}
+check_next_scan:
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1807,7 +1809,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
}
-done:
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 31d7b2bdaa3..d3fb9a14580 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -332,7 +332,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
u8 *buffer, u32 pkt_len, u32 port)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
u8 blk_mode =
(port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
@@ -350,8 +350,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
sdio_claim_host(card->func);
- if (!sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size))
- ret = 0;
+ ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
sdio_release_host(card->func);
@@ -365,7 +364,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
u32 len, u32 port, u8 claim)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
: BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
@@ -376,8 +375,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
if (claim)
sdio_claim_host(card->func);
- if (!sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size))
- ret = 0;
+ ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
if (claim)
sdio_release_host(card->func);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 65c12eb3e5e..847056415ac 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -935,9 +935,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
/ MWIFIEX_SDIO_BLOCK_SIZE)
* MWIFIEX_SDIO_BLOCK_SIZE;
adapter->curr_tx_buf_size = adapter->tx_buf_size;
- dev_dbg(adapter->dev,
- "cmd: max_tx_buf_size=%d, tx_buf_size=%d\n",
- adapter->max_tx_buf_size, adapter->tx_buf_size);
+ dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
+ adapter->curr_tx_buf_size);
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index b8fa76a2b95..7eef74564a9 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -162,13 +162,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
rcu_read_lock();
ies = rcu_dereference(bss->ies);
- if (WARN_ON(!ies)) {
- /* should never happen */
- rcu_read_unlock();
- return -EINVAL;
- }
beacon_ie = kmemdup(ies->data, ies->len, GFP_ATOMIC);
beacon_ie_len = ies->len;
+ bss_desc->timestamp = ies->tsf;
rcu_read_unlock();
if (!beacon_ie) {
@@ -184,7 +180,6 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
bss_desc->cap_info_bitmap = bss->capability;
bss_desc->bss_band = bss_priv->band;
bss_desc->fw_tsf = bss_priv->fw_tsf;
- bss_desc->timestamp = bss->tsf;
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
@@ -322,7 +317,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
}
if (bss)
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
/* Adhoc mode */
/* If the requested SSID matches current SSID, return */
@@ -352,7 +347,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
" list. Joining...\n");
ret = mwifiex_adhoc_join(priv, bss_desc);
if (bss)
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
dev_dbg(adapter->dev, "info: Network not found in "
"the list, creating adhoc with ssid = %s\n",
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 5d4a10a8a00..f90fe21e5bf 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -672,7 +672,7 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
*len, &actual_length, timeout);
if (ret) {
dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
- ret = -1;
+ return ret;
}
*len = actual_length;
@@ -691,7 +691,7 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
*len, &actual_length, timeout);
if (ret) {
dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
- ret = -1;
+ return ret;
}
*len = actual_length;
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 0982375ba3b..21553976b55 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -91,7 +91,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
memcpy(info->packets_out,
priv->wmm.packets_out,
sizeof(priv->wmm.packets_out));
- info->max_tx_buf_size = (u32) adapter->max_tx_buf_size;
+ info->curr_tx_buf_size = (u32) adapter->curr_tx_buf_size;
info->tx_buf_size = (u32) adapter->tx_buf_size;
info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(priv,
info->rx_tbl);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 224cf917744..091d9a64080 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -285,6 +285,9 @@ struct mwl8k_priv {
char *fw_pref;
char *fw_alt;
struct completion firmware_loading_complete;
+
+ /* bitmap of running BSSes */
+ u32 running_bsses;
};
#define MAX_WEP_KEY_LEN 13
@@ -331,20 +334,20 @@ struct mwl8k_sta {
#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
static const struct ieee80211_channel mwl8k_channels_24[] = {
- { .center_freq = 2412, .hw_value = 1, },
- { .center_freq = 2417, .hw_value = 2, },
- { .center_freq = 2422, .hw_value = 3, },
- { .center_freq = 2427, .hw_value = 4, },
- { .center_freq = 2432, .hw_value = 5, },
- { .center_freq = 2437, .hw_value = 6, },
- { .center_freq = 2442, .hw_value = 7, },
- { .center_freq = 2447, .hw_value = 8, },
- { .center_freq = 2452, .hw_value = 9, },
- { .center_freq = 2457, .hw_value = 10, },
- { .center_freq = 2462, .hw_value = 11, },
- { .center_freq = 2467, .hw_value = 12, },
- { .center_freq = 2472, .hw_value = 13, },
- { .center_freq = 2484, .hw_value = 14, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
};
static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -365,10 +368,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
};
static const struct ieee80211_channel mwl8k_channels_50[] = {
- { .center_freq = 5180, .hw_value = 36, },
- { .center_freq = 5200, .hw_value = 40, },
- { .center_freq = 5220, .hw_value = 44, },
- { .center_freq = 5240, .hw_value = 48, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1146,7 +1149,6 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
@@ -1439,7 +1441,6 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
@@ -2156,6 +2157,8 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
}
}
+static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable,
+ u32 bitmap);
/*
* Command processing.
@@ -2174,6 +2177,34 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
int rc;
unsigned long timeout = 0;
u8 buf[32];
+ u32 bitmap = 0;
+
+ wiphy_dbg(hw->wiphy, "Posting %s [%d]\n",
+ mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), cmd->macid);
+
+ /* Before posting firmware commands that could change the hardware
+ * characteristics, make sure that all BSSes are stopped temporary.
+ * Enable these stopped BSSes after completion of the commands
+ */
+
+ rc = mwl8k_fw_lock(hw);
+ if (rc)
+ return rc;
+
+ if (priv->ap_fw && priv->running_bsses) {
+ switch (le16_to_cpu(cmd->code)) {
+ case MWL8K_CMD_SET_RF_CHANNEL:
+ case MWL8K_CMD_RADIO_CONTROL:
+ case MWL8K_CMD_RF_TX_POWER:
+ case MWL8K_CMD_TX_POWER:
+ case MWL8K_CMD_RF_ANTENNA:
+ case MWL8K_CMD_RTS_THRESHOLD:
+ case MWL8K_CMD_MIMO_CONFIG:
+ bitmap = priv->running_bsses;
+ mwl8k_enable_bsses(hw, false, bitmap);
+ break;
+ }
+ }
cmd->result = (__force __le16) 0xffff;
dma_size = le16_to_cpu(cmd->length);
@@ -2182,13 +2213,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
if (pci_dma_mapping_error(priv->pdev, dma_addr))
return -ENOMEM;
- rc = mwl8k_fw_lock(hw);
- if (rc) {
- pci_unmap_single(priv->pdev, dma_addr, dma_size,
- PCI_DMA_BIDIRECTIONAL);
- return rc;
- }
-
priv->hostcmd_wait = &cmd_wait;
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
iowrite32(MWL8K_H2A_INT_DOORBELL,
@@ -2201,7 +2225,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
priv->hostcmd_wait = NULL;
- mwl8k_fw_unlock(hw);
pci_unmap_single(priv->pdev, dma_addr, dma_size,
PCI_DMA_BIDIRECTIONAL);
@@ -2228,6 +2251,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
ms);
}
+ if (bitmap)
+ mwl8k_enable_bsses(hw, true, bitmap);
+
+ mwl8k_fw_unlock(hw);
+
return rc;
}
@@ -2489,7 +2517,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->hw_rev = cmd->hw_rev;
mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
priv->ap_macids_supported = 0x000000ff;
- priv->sta_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000100;
priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues);
if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) {
wiphy_warn(hw->wiphy, "fw reported %d ampdu queues"
@@ -3508,7 +3536,10 @@ static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw,
mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
- mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ if (priv->ap_fw)
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
else
mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
} else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
@@ -3680,8 +3711,16 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int enable)
{
struct mwl8k_cmd_bss_start *cmd;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_priv *priv = hw->priv;
int rc;
+ if (enable && (priv->running_bsses & (1 << mwl8k_vif->macid)))
+ return 0;
+
+ if (!enable && !(priv->running_bsses & (1 << mwl8k_vif->macid)))
+ return 0;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -3693,9 +3732,31 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
+ if (!rc) {
+ if (enable)
+ priv->running_bsses |= (1 << mwl8k_vif->macid);
+ else
+ priv->running_bsses &= ~(1 << mwl8k_vif->macid);
+ }
return rc;
}
+static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable, u32 bitmap)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif, *tmp_vif;
+ struct ieee80211_vif *vif;
+
+ list_for_each_entry_safe(mwl8k_vif, tmp_vif, &priv->vif_list, list) {
+ vif = mwl8k_vif->vif;
+
+ if (!(bitmap & (1 << mwl8k_vif->macid)))
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ mwl8k_cmd_bss_start(hw, vif, enable);
+ }
+}
/*
* CMD_BASTREAM.
*/
@@ -4202,8 +4263,9 @@ static int mwl8k_set_key(struct ieee80211_hw *hw,
u8 encr_type;
u8 *addr;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_priv *priv = hw->priv;
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !priv->ap_fw)
return -EOPNOTSUPP;
if (sta == NULL)
@@ -4609,12 +4671,18 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
if (priv->ap_fw && di->fw_image_sta) {
- /* we must load the sta fw to meet this request */
- if (!list_empty(&priv->vif_list))
- return -EBUSY;
- rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
- if (rc)
- return rc;
+ if (!list_empty(&priv->vif_list)) {
+ wiphy_warn(hw->wiphy, "AP interface is running.\n"
+ "Adding STA interface for WDS");
+ } else {
+ /* we must load the sta fw to
+ * meet this request.
+ */
+ rc = mwl8k_reload_firmware(hw,
+ di->fw_image_sta);
+ if (rc)
+ return rc;
+ }
}
macids_supported = priv->sta_macids_supported;
break;
@@ -4638,7 +4706,7 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
/* Set the mac address. */
mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
- if (priv->ap_fw)
+ if (vif->type == NL80211_IFTYPE_AP)
mwl8k_cmd_set_new_stn_add_self(hw, vif);
priv->macids_used |= 1 << mwl8k_vif->macid;
@@ -4663,7 +4731,7 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->ap_fw)
+ if (vif->type == NL80211_IFTYPE_AP)
mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
mwl8k_cmd_del_mac_addr(hw, vif, vif->addr);
@@ -4737,9 +4805,11 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf);
- if (rc)
- goto out;
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
+ if (rc)
+ goto out;
+ }
if (conf->power_level > 18)
conf->power_level = 18;
@@ -4752,12 +4822,6 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
- if (rc)
- wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
- if (rc)
- wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
} else {
rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
@@ -4815,7 +4879,8 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rcu_read_unlock();
}
- if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
+ !priv->ap_fw) {
rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
@@ -4823,6 +4888,25 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ } else {
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
+ priv->ap_fw) {
+ int idx;
+ int rate;
+
+ /* Use AP firmware specific rate command.
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -4832,13 +4916,13 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (changed & BSS_CHANGED_ERP_SLOT) {
+ if ((changed & BSS_CHANGED_ERP_SLOT) && !priv->ap_fw) {
rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
}
- if (vif->bss_conf.assoc &&
+ if (vif->bss_conf.assoc && !priv->ap_fw &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_HT))) {
rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
@@ -4918,11 +5002,9 @@ static void
mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mwl8k_priv *priv = hw->priv;
-
- if (!priv->ap_fw)
+ if (vif->type == NL80211_IFTYPE_STATION)
mwl8k_bss_info_changed_sta(hw, vif, info, changed);
- else
+ if (vif->type == NL80211_IFTYPE_AP)
mwl8k_bss_info_changed_ap(hw, vif, info, changed);
}
@@ -5389,6 +5471,8 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, },
{ PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
@@ -5647,6 +5731,15 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
goto err_free_irq;
}
+ /* Configure Antennas */
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
+
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
@@ -5734,6 +5827,7 @@ fail:
static const struct ieee80211_iface_limit ap_if_limits[] = {
{ .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
};
static const struct ieee80211_iface_combination ap_if_comb = {
@@ -5826,6 +5920,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
if (priv->ap_macids_supported || priv->device_info->fw_image_ap) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
hw->wiphy->iface_combinations = &ap_if_comb;
hw->wiphy->n_iface_combinations = 1;
}
@@ -5948,6 +6043,8 @@ static int mwl8k_probe(struct pci_dev *pdev,
priv->hw_restart_in_progress = false;
+ priv->running_bsses = 0;
+
return rc;
err_stop_firmware:
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 96e39edfec7..e8c5714bfd1 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -125,7 +125,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
capability, beacon_interval, ie_buf, ie_len,
signal, GFP_KERNEL);
- cfg80211_put_bss(cbss);
+ cfg80211_put_bss(wiphy, cbss);
}
void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -158,7 +158,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
capability, beacon_interval, ie, ie_len,
signal, GFP_KERNEL);
- cfg80211_put_bss(cbss);
+ cfg80211_put_bss(wiphy, cbss);
}
void orinoco_add_hostscan_results(struct orinoco_private *priv,
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 62ac6073753..b9deef66cf4 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
- {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
{USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
{USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 9bb3f22b366..525fd7521df 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2026,7 +2026,7 @@ static bool rndis_bss_info_update(struct usbnet *usbdev,
bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
timestamp, capability, beacon_interval, ie, ie_len, signal,
GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
return (bss != NULL);
}
@@ -2715,7 +2715,7 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
timestamp, capability, beacon_period, ie_buf, ie_len,
signal, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index a2d2bc2c7b3..221beaaa83f 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1185,8 +1185,14 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- rt2x00queue_map_txskb(entry);
-
+ if (rt2x00queue_map_txskb(entry)) {
+ ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+ goto out;
+ }
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
/*
* Write the TX descriptor for the beacon.
*/
@@ -1196,7 +1202,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
/*
* Enable beaconing again.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 9bea10f53f0..39edc59e8d0 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1338,7 +1338,10 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- rt2x00queue_map_txskb(entry);
+ if (rt2x00queue_map_txskb(entry)) {
+ ERROR(rt2x00dev, "Fail to map beacon, aborting\n");
+ goto out;
+ }
/*
* Write the TX descriptor for the beacon.
@@ -1349,7 +1352,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
-
+out:
/*
* Enable beaconing again.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a5c694f23d3..a658b4bc7da 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -80,7 +80,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF3022))
return true;
- NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
+ WARNING(rt2x00dev, "Unknown RF chipset on rt305x\n");
return false;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 0e8d1705e36..48a01aa21f1 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -1152,6 +1152,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3562) },
{ PCI_DEVICE(0x1814, 0x3592) },
{ PCI_DEVICE(0x1814, 0x3593) },
+ { PCI_DEVICE(0x1814, 0x359f) },
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
{ PCI_DEVICE(0x1814, 0x5360) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 4721cada159..098613ed93f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -540,9 +540,9 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
- WARNING(entry->queue->rt2x00dev,
- "TX status report missed for queue %d entry %d\n",
- entry->queue->qid, entry->entry_idx);
+ DEBUG(entry->queue->rt2x00dev,
+ "TX status report missed for queue %d entry %d\n",
+ entry->queue->qid, entry->entry_idx);
return TXDONE_UNKNOWN;
}
@@ -968,6 +968,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c13) },
{ USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
+ { USB_DEVICE(0x07d1, 0x3c17) },
{ USB_DEVICE(0x2001, 0x3c1b) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
@@ -1098,9 +1099,11 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x15a9, 0x0006) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153) },
+ { USB_DEVICE(0x177f, 0x0164) },
{ USB_DEVICE(0x177f, 0x0302) },
{ USB_DEVICE(0x177f, 0x0313) },
{ USB_DEVICE(0x177f, 0x0323) },
+ { USB_DEVICE(0x177f, 0x0324) },
/* U-Media */
{ USB_DEVICE(0x157e, 0x300e) },
{ USB_DEVICE(0x157e, 0x3013) },
@@ -1115,6 +1118,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Zyxel */
{ USB_DEVICE(0x0586, 0x3416) },
{ USB_DEVICE(0x0586, 0x3418) },
+ { USB_DEVICE(0x0586, 0x341a) },
{ USB_DEVICE(0x0586, 0x341e) },
{ USB_DEVICE(0x0586, 0x343e) },
#ifdef CONFIG_RT2800USB_RT33XX
@@ -1131,6 +1135,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x8070) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0050) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0163) },
+ { USB_DEVICE(0x177f, 0x0165) },
#endif
#ifdef CONFIG_RT2800USB_RT35XX
/* Allwin */
@@ -1166,6 +1173,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
+ { USB_DEVICE(0x043e, 0x7a32) },
/* Azurewave */
{ USB_DEVICE(0x13d3, 0x3329) },
{ USB_DEVICE(0x13d3, 0x3365) },
@@ -1177,16 +1185,20 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1e) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
+ { USB_DEVICE(0x043e, 0x7a42) },
/* Panasonic */
{ USB_DEVICE(0x04da, 0x1801) },
{ USB_DEVICE(0x04da, 0x1800) },
+ { USB_DEVICE(0x04da, 0x23f6) },
/* Philips */
{ USB_DEVICE(0x0471, 0x2104) },
+ { USB_DEVICE(0x0471, 0x2126) },
+ { USB_DEVICE(0x0471, 0x2180) },
+ { USB_DEVICE(0x0471, 0x2181) },
+ { USB_DEVICE(0x0471, 0x2182) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5370) },
{ USB_DEVICE(0x148f, 0x5372) },
- /* Unknown */
- { USB_DEVICE(0x04da, 0x23f6) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1207,10 +1219,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0b05, 0x1760) },
{ USB_DEVICE(0x0b05, 0x1761) },
{ USB_DEVICE(0x0b05, 0x1790) },
+ { USB_DEVICE(0x0b05, 0x17a7) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3262) },
{ USB_DEVICE(0x13d3, 0x3284) },
{ USB_DEVICE(0x13d3, 0x3322) },
+ { USB_DEVICE(0x13d3, 0x3340) },
+ { USB_DEVICE(0x13d3, 0x3399) },
+ { USB_DEVICE(0x13d3, 0x3400) },
+ { USB_DEVICE(0x13d3, 0x3401) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x1003) },
/* Buffalo */
@@ -1223,13 +1240,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x18c5, 0x0008) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
- { USB_DEVICE(0x07d1, 0x3c17) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
+ /* EnGenius */
+ { USB_DEVICE(0x1740, 0x0600) },
+ { USB_DEVICE(0x1740, 0x0602) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800c) },
+ /* Hercules */
+ { USB_DEVICE(0x06f8, 0xe036) },
/* Huawei */
{ USB_DEVICE(0x148f, 0xf101) },
/* I-O DATA */
@@ -1256,13 +1277,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x004a) },
{ USB_DEVICE(0x0df6, 0x004d) },
{ USB_DEVICE(0x0df6, 0x0053) },
+ { USB_DEVICE(0x0df6, 0x0069) },
+ { USB_DEVICE(0x0df6, 0x006f) },
/* SMC */
{ USB_DEVICE(0x083a, 0xa512) },
{ USB_DEVICE(0x083a, 0xc522) },
{ USB_DEVICE(0x083a, 0xd522) },
{ USB_DEVICE(0x083a, 0xf511) },
- /* Zyxel */
- { USB_DEVICE(0x0586, 0x341a) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0254) },
+ /* TP-LINK */
+ { USB_DEVICE(0xf201, 0x5370) },
#endif
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index b52512b8ac5..086abb403a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -88,11 +88,9 @@
#define ERROR_PROBE(__msg, __args...) \
DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args)
#define WARNING(__dev, __msg, __args...) \
- DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args)
-#define NOTICE(__dev, __msg, __args...) \
- DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args)
+ DEBUG_PRINTK_MSG(__dev, KERN_WARNING, "Warning", __msg, ##__args)
#define INFO(__dev, __msg, __args...) \
- DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args)
+ DEBUG_PRINTK_MSG(__dev, KERN_INFO, "Info", __msg, ##__args)
#define DEBUG(__dev, __msg, __args...) \
DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args)
#define EEPROM(__dev, __msg, __args...) \
@@ -1171,8 +1169,10 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
+ *
+ * Returns -ENOMEM if mapping fail, 0 otherwise.
*/
-void rt2x00queue_map_txskb(struct queue_entry *entry);
+int rt2x00queue_map_txskb(struct queue_entry *entry);
/**
* rt2x00queue_unmap_skb - Unmap a skb from DMA.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index b40a5385749..1031db66474 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1236,7 +1236,8 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
*/
if_limit = &rt2x00dev->if_limits_ap;
if_limit->max = rt2x00dev->ops->max_ap_intf;
- if_limit->types = BIT(NL80211_IFTYPE_AP);
+ if_limit->types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
/*
* Build up AP interface combinations structure.
@@ -1446,7 +1447,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
#ifdef CONFIG_PM
int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
{
- NOTICE(rt2x00dev, "Going to sleep.\n");
+ DEBUG(rt2x00dev, "Going to sleep.\n");
/*
* Prevent mac80211 from accessing driver while suspended.
@@ -1486,7 +1487,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
{
- NOTICE(rt2x00dev, "Waking up.\n");
+ DEBUG(rt2x00dev, "Waking up.\n");
/*
* Restore/enable extra components.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ed7a1bb3f24..20c6eccce5a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -731,9 +731,9 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
queue->aifs = params->aifs;
queue->txop = params->txop;
- INFO(rt2x00dev,
- "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
- queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
+ DEBUG(rt2x00dev,
+ "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
+ queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index f35d85a71bb..4d91795dc6a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -87,24 +87,35 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
skbdesc->entry = entry;
if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
- skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
- skb->data,
- skb->len,
- DMA_FROM_DEVICE);
+ dma_addr_t skb_dma;
+
+ skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ skbdesc->skb_dma = skb_dma;
skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
}
return skb;
}
-void rt2x00queue_map_txskb(struct queue_entry *entry)
+int rt2x00queue_map_txskb(struct queue_entry *entry)
{
struct device *dev = entry->queue->rt2x00dev->dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skbdesc->skb_dma =
dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
+ return -ENOMEM;
+
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+ return 0;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -343,10 +354,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* when using more then one tx stream (>MCS7).
*/
if (sta && txdesc->u.ht.mcs > 7 &&
- ((sta->ht_cap.cap &
- IEEE80211_HT_CAP_SM_PS) >>
- IEEE80211_HT_CAP_SM_PS_SHIFT) ==
- WLAN_HT_CAP_SM_PS_DYNAMIC)
+ sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
@@ -545,8 +553,9 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
/*
* Map the skb to DMA.
*/
- if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
- rt2x00queue_map_txskb(entry);
+ if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
+ rt2x00queue_map_txskb(entry))
+ return -ENOMEM;
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index b80bc461258..b6aa0c40658 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,8 +1,26 @@
+config RTLWIFI
+ tristate "Realtek wireless card support"
+ depends on MAC80211
+ select FW_LOADER
+ ---help---
+ This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
+ drivers. This module does nothing by itself - the various front-end
+ drivers need to be enabled to support any desired devices.
+
+ If you choose to build as a module, it'll be called rtlwifi.
+
+config RTLWIFI_DEBUG
+ bool "Debugging output for rtlwifi driver family"
+ depends on RTLWIFI
+ default y
+ ---help---
+ To use the module option that sets the dynamic-debugging level for,
+ the front-end driver, this parameter must be "Y". For memory-limited
+ systems, choose "N". If in doubt, choose "Y".
+
config RTL8192CE
tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
- depends on MAC80211 && PCI
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
@@ -12,9 +30,7 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on MAC80211 && PCI
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
---help---
This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
wireless network adapters.
@@ -23,9 +39,7 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on MAC80211 && PCI
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
---help---
This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
wireless network adapters.
@@ -34,9 +48,7 @@ config RTL8192DE
config RTL8723AE
tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
- depends on MAC80211 && PCI && EXPERIMENTAL
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && PCI
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
@@ -45,9 +57,7 @@ config RTL8723AE
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
- depends on MAC80211 && USB
- select FW_LOADER
- select RTLWIFI
+ depends on RTLWIFI && USB
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -55,16 +65,6 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
-config RTLWIFI
- tristate
- depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
- default m
-
-config RTLWIFI_DEBUG
- bool "Additional debugging output"
- depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
- default y
-
config RTL8192C_COMMON
tristate
depends on RTL8192CE || RTL8192CU
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 4494d130b37..99c5cea3fe2 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -523,8 +523,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_STATION)
bw_40 = mac->bw_40;
else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC)
- bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
if (bw_40 && sgi_40)
tcb_desc->use_shortgi = true;
@@ -634,8 +634,7 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
return;
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (!(sta->ht_cap.ht_supported) ||
- !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
@@ -1004,7 +1003,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
is_tx ? "Tx" : "Rx");
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->
+ works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies =
jiffies;
}
@@ -1014,7 +1014,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
} else if (ETH_P_ARP == ether_type) {
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@@ -1024,7 +1024,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index c1e065f136b..f9f059dadb7 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -116,9 +116,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (txrc->short_preamble)
rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (sta && (sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta && (sta->bandwidth >= IEEE80211_STA_RX_BW_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
} else {
if (mac->bw_40)
@@ -223,13 +222,6 @@ static void rtl_rate_init(void *ppriv,
{
}
-static void rtl_rate_update(void *ppriv,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
-}
-
static void *rtl_rate_alloc(struct ieee80211_hw *hw,
struct dentry *debugfsdir)
{
@@ -275,7 +267,6 @@ static struct rate_control_ops rtl_rate_ops = {
.alloc_sta = rtl_rate_alloc_sta,
.free_sta = rtl_rate_free_sta,
.rate_init = rtl_rate_init,
- .rate_update = rtl_rate_update,
.tx_status = rtl_tx_status,
.get_rate = rtl_get_rate,
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1cdf5a271c9..b793a659a46 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -669,7 +669,8 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
u8 thermalvalue, delta, delta_lck, delta_iqk;
long ele_a, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
+ u8 ofdm_index[2], ofdm_index_old[2], cck_index_old = 0;
+ s8 cck_index = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
s8 txpwr_level[2] = {0, 0};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index d1f34f6ffbd..1b65db7fd65 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -1846,9 +1846,9 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = curtxbw_40mhz &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index c31795e379f..b9b1a6e0b16 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -488,7 +488,7 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
__le16 fc;
u16 type, c_fc;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
@@ -626,8 +626,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 32ff959a025..85b6bdb163c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -1084,7 +1084,7 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
__le16 fc;
u16 type, cpu_fc;
- bool packet_matchbssid, packet_toself, packet_beacon;
+ bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index b7e6607e6b6..a73a17bc56d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -76,7 +76,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
GFP_KERNEL, hw, rtl_fw_cb);
- return 0;
+ return err;
}
static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
@@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* RTL8188CUS-VL */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
@@ -363,9 +364,15 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+static int rtl8192cu_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg);
+}
+
static struct usb_driver rtl8192cu_driver = {
.name = "rtl8192cu",
- .probe = rtl_usb_probe,
+ .probe = rtl8192cu_probe,
.disconnect = rtl_usb_disconnect,
.id_table = rtl8192c_usb_ids,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index fd8df233ff2..5251fb8a111 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -841,9 +841,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2];
- u8 cck_index = 0;
+ s8 cck_index = 0;
u8 ofdm_index_old[2];
- u8 cck_index_old = 0;
+ s8 cck_index_old = 0;
u8 index;
int i;
bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index f4051f4f039..aa5b42521bb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1970,8 +1970,7 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index cdb570ffb4b..941080e03c0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -574,8 +574,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 28526a7361f..084e7773bce 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -2085,8 +2085,7 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index = 0;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index f8431a3c2c9..7b0a2e75b8b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -621,8 +621,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
index f55b1767ef5..35cb8f83eed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -252,7 +252,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg = 0, box_extreg = 0;
u8 u1tmp;
bool isfw_rd = false;
- bool bwrite_sucess = false;
+ bool bwrite_success = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;
u8 boxcontent[4], boxextcontent[2];
@@ -291,7 +291,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
}
}
- while (!bwrite_sucess) {
+ while (!bwrite_success) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -429,7 +429,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
break;
}
- bwrite_sucess = true;
+ bwrite_success = true;
rtlhal->last_hmeboxnum = boxnum + 1;
if (rtlhal->last_hmeboxnum == 4)
@@ -512,7 +512,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
- u8 own;
unsigned long flags;
struct sk_buff *pskb = NULL;
@@ -525,7 +524,6 @@ static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
index 887d521fe69..68c28340f79 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -1433,7 +1433,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
- u8 bt_retry_cnt;
u8 bt_info_original;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex] Get bt info by fw!!\n");
@@ -1445,7 +1444,6 @@ static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
"[BTCoex] c2h for btInfo not rcvd yet!!\n");
}
- bt_retry_cnt = rtlhal->hal_coex_8723.bt_retry_cnt;
bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original;
/* when bt inquiry or page scan, we have to set h2c 0x25
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 0a8c03863fb..9a0c71c2e15 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -703,11 +703,9 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 reg_bw_opmode;
- u32 reg_ratr, reg_prsr;
+ u32 reg_prsr;
reg_bw_opmode = BW_OPMODE_20MHZ;
- reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
@@ -1868,8 +1866,7 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
@@ -2030,7 +2027,7 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u8 u1tmp;
bool actuallyset = false;
@@ -2049,8 +2046,6 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
- cur_rfstate = ppsc->rfpwr_state;
-
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index 3d8536bb0d2..eafbb18dd48 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -614,17 +614,11 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int i;
- bool rtstatus = true;
u32 *radioa_array_table;
- u32 *radiob_array_table;
- u16 radioa_arraylen, radiob_arraylen;
+ u16 radioa_arraylen;
radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
- radiob_array_table = RTL8723E_RADIOB_1TARRAY;
-
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
@@ -1531,11 +1525,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
0x522, 0x550, 0x551, 0x040
};
const u32 retrycount = 2;
- u32 bbvalue;
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
-
phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
}
@@ -1712,8 +1703,7 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
long result[4][8];
u8 i, final_candidate;
bool patha_ok, pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0;
bool is12simular, is13simular, is23simular;
bool start_conttx = false, singletone = false;
u32 iqk_bb_reg[10] = {
@@ -1780,21 +1770,15 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
patha_ok = pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index ce8ad12bce5..ac081297db5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -244,7 +244,6 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
- u8 *psaddr;
__le16 fc;
u16 type;
bool packet_matchbssid, packet_toself, packet_beacon = false;
@@ -255,7 +254,6 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
fc = hdr->frame_control;
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- psaddr = ieee80211_get_SA(hdr);
packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(mac->bssid,
@@ -397,8 +395,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index f2ecdeb3a90..476eaef5e4a 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
WARN_ON(skb_queue_empty(&rx_queue));
while (!skb_queue_empty(&rx_queue)) {
_skb = skb_dequeue(&rx_queue);
- _rtl_usb_rx_process_agg(hw, skb);
- ieee80211_rx_irqsafe(hw, skb);
+ _rtl_usb_rx_process_agg(hw, _skb);
+ ieee80211_rx_irqsafe(hw, _skb);
}
}
@@ -825,8 +825,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
u32 ep_num;
struct urb *_urb = NULL;
struct sk_buff *_skb = NULL;
- struct sk_buff_head *skb_list;
- struct usb_anchor *urb_list;
WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
if (unlikely(IS_USB_STOP(rtlusb))) {
@@ -836,7 +834,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
ep_num = rtlusb->ep_map.ep_mapping[qnum];
- skb_list = &rtlusb->tx_skb_queue[ep_num];
_skb = skb;
_urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
if (unlikely(!_urb)) {
@@ -844,7 +841,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
"Can't allocate urb. Drop skb!\n");
return;
}
- urb_list = &rtlusb->tx_pending[ep_num];
_rtl_submit_tx_urb(hw, _urb);
}
@@ -941,7 +937,8 @@ static struct rtl_intf_ops rtl_usb_ops = {
};
int rtl_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id,
+ struct rtl_hal_cfg *rtl_hal_cfg)
{
int err;
struct ieee80211_hw *hw = NULL;
@@ -976,7 +973,7 @@ int rtl_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, hw);
/* init cfg & intf_ops */
rtlpriv->rtlhal.interface = INTF_USB;
- rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+ rtlpriv->cfg = rtl_hal_cfg;
rtlpriv->intf_ops = &rtl_usb_ops;
rtl_dbgp_flag_init(hw);
/* Init IO handler */
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
index 5235136f6dd..fb986f98d1d 100644
--- a/drivers/net/wireless/rtlwifi/usb.h
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -157,7 +157,8 @@ struct rtl_usb_priv {
int rtl_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
+ const struct usb_device_id *id,
+ struct rtl_hal_cfg *rtl92cu_hal_cfg);
void rtl_usb_disconnect(struct usb_interface *intf);
int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
int rtl_usb_resume(struct usb_interface *pusb_intf);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 21a5f4f4a13..f13258a8d99 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1702,7 +1702,7 @@ struct rtl_works {
struct rtl_debug {
u32 dbgp_type[DBGP_TYPE_MAX];
- u32 global_debuglevel;
+ int global_debuglevel;
u64 global_debugcomponents;
/* add for proc debug */
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index be800119d0a..cbe1e7fef61 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -12,4 +12,13 @@ source "drivers/net/wireless/ti/wl18xx/Kconfig"
# keep last for automatic dependencies
source "drivers/net/wireless/ti/wlcore/Kconfig"
+
+config WILINK_PLATFORM_DATA
+ bool "TI WiLink platform data"
+ depends on WLCORE_SDIO || WL1251_SDIO
+ default y
+ ---help---
+ Small platform data bit needed to pass data to the sdio modules.
+
+
endif # WL_TI
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 4d6823983c0..af14231aeed 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_WLCORE) += wlcore/
obj-$(CONFIG_WL12XX) += wl12xx/
-obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
obj-$(CONFIG_WL1251) += wl1251/
obj-$(CONFIG_WL18XX) += wl18xx/
+
+# small builtin driver bit
+obj-$(CONFIG_WILINK_PLATFORM_DATA) += wilink_platform_data.o
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
index 998e95895f9..998e95895f9 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
+++ b/drivers/net/wireless/ti/wilink_platform_data.c
diff --git a/drivers/net/wireless/ti/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig
index 1fb65849414..8fec4ed36ac 100644
--- a/drivers/net/wireless/ti/wl1251/Kconfig
+++ b/drivers/net/wireless/ti/wl1251/Kconfig
@@ -1,6 +1,6 @@
menuconfig WL1251
tristate "TI wl1251 driver support"
- depends on MAC80211 && EXPERIMENTAL && GENERIC_HARDIRQS
+ depends on MAC80211 && GENERIC_HARDIRQS
select FW_LOADER
select CRC7
---help---
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 5ec50a476a6..74ae8e1c2e3 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -29,6 +29,8 @@
static int wl1251_event_scan_complete(struct wl1251 *wl,
struct event_mailbox *mbox)
{
+ int ret = 0;
+
wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d",
mbox->scheduled_scan_status,
mbox->scheduled_scan_channels);
@@ -37,9 +39,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
ieee80211_scan_completed(wl->hw, false);
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
wl->scanning = false;
+ if (wl->hw->conf.flags & IEEE80211_CONF_IDLE)
+ ret = wl1251_ps_set_mode(wl, STATION_IDLE);
}
- return 0;
+ return ret;
}
static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index f47e8b0482a..bbbf68cf50a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -623,7 +623,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (changed & IEEE80211_CONF_CHANGE_IDLE && !wl->scanning) {
if (conf->flags & IEEE80211_CONF_IDLE) {
ret = wl1251_ps_set_mode(wl, STATION_IDLE);
if (ret < 0)
@@ -895,11 +895,21 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (hw->conf.flags & IEEE80211_CONF_IDLE) {
+ ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ if (ret < 0)
+ goto out_sleep;
+ }
+
skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
req->ie_len);
if (!skb) {
ret = -ENOMEM;
- goto out;
+ goto out_idle;
}
if (req->ie_len)
memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
@@ -908,11 +918,11 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
skb->len);
dev_kfree_skb(skb);
if (ret < 0)
- goto out_sleep;
+ goto out_idle;
ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- goto out_sleep;
+ goto out_idle;
wl->scanning = true;
@@ -920,9 +930,13 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
req->n_channels, WL1251_SCAN_NUM_PROBES);
if (ret < 0) {
wl->scanning = false;
- goto out_sleep;
+ goto out_idle;
}
+ goto out_sleep;
+out_idle:
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ ret = wl1251_ps_set_mode(wl, STATION_IDLE);
out_sleep:
wl1251_ps_elp_sleep(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
index da509aa7d00..e6a24056b3c 100644
--- a/drivers/net/wireless/ti/wl12xx/Makefile
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -1,3 +1,3 @@
-wl12xx-objs = main.o cmd.o acx.o debugfs.o
+wl12xx-objs = main.o cmd.o acx.o debugfs.o scan.o event.o
obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 622206241e8..7dc9f965037 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -284,3 +284,40 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
kfree(radio_parms);
return ret;
}
+
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct wl12xx_cmd_channel_switch *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "cmd channel switch");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+ cmd->channel = ch_switch->channel->hw_value;
+ cmd->switch_time = ch_switch->count;
+ cmd->stop_tx = ch_switch->block_tx;
+
+ /* FIXME: control from mac80211 in the future */
+ /* Enable TX on the target channel */
+ cmd->post_switch_tx_disable = 0;
+
+ ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send channel switch command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h
index 140a0e8829d..32cbad54e99 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.h
+++ b/drivers/net/wireless/ti/wl12xx/cmd.h
@@ -103,10 +103,30 @@ struct wl1271_ext_radio_parms_cmd {
u8 padding[3];
} __packed;
+struct wl12xx_cmd_channel_switch {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+
+ /* The new serving channel */
+ u8 channel;
+ /* Relative time of the serving channel switch in TBTT units */
+ u8 switch_time;
+ /* Stop the role TX, should expect it after radar detection */
+ u8 stop_tx;
+ /* The target channel tx status 1-stopped 0-open*/
+ u8 post_switch_tx_disable;
+
+ u8 padding[3];
+} __packed;
+
int wl1271_cmd_general_parms(struct wl1271 *wl);
int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
+int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch);
#endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/event.c b/drivers/net/wireless/ti/wl12xx/event.c
new file mode 100644
index 00000000000..6ac0ed751da
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/event.c
@@ -0,0 +1,116 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "event.h"
+#include "scan.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout)
+{
+ u32 local_event;
+
+ switch (event) {
+ case WLCORE_EVENT_ROLE_STOP_COMPLETE:
+ local_event = ROLE_STOP_COMPLETE_EVENT_ID;
+ break;
+
+ case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
+ local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
+ break;
+
+ default:
+ /* event not implemented */
+ return 0;
+ }
+ return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
+}
+
+int wl12xx_process_mailbox_events(struct wl1271 *wl)
+{
+ struct wl12xx_event_mailbox *mbox = wl->mbox;
+ u32 vector;
+
+
+ vector = le32_to_cpu(mbox->events_vector);
+ vector &= ~(le32_to_cpu(mbox->events_mask));
+
+ wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
+
+ if (vector & SCAN_COMPLETE_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "status: 0x%x",
+ mbox->scheduled_scan_status);
+
+ if (wl->scan_wlvif)
+ wl12xx_scan_completed(wl, wl->scan_wlvif);
+ }
+
+ if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT,
+ "PERIODIC_SCAN_REPORT_EVENT (status 0x%0x)",
+ mbox->scheduled_scan_status);
+
+ wlcore_scan_sched_scan_results(wl);
+ }
+
+ if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
+ wlcore_event_sched_scan_completed(wl,
+ mbox->scheduled_scan_status);
+ if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
+ wlcore_event_soft_gemini_sense(wl,
+ mbox->soft_gemini_sense_info);
+
+ if (vector & BSS_LOSE_EVENT_ID)
+ wlcore_event_beacon_loss(wl, 0xff);
+
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
+ wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
+
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
+ wlcore_event_ba_rx_constraint(wl,
+ BIT(mbox->role_id),
+ mbox->rx_ba_allowed);
+
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
+ wlcore_event_channel_switch(wl, 0xff,
+ mbox->channel_switch_status);
+
+ if (vector & DUMMY_PACKET_EVENT_ID)
+ wlcore_event_dummy_packet(wl);
+
+ /*
+ * "TX retries exceeded" has a different meaning according to mode.
+ * In AP mode the offending station is disconnected.
+ */
+ if (vector & MAX_TX_RETRY_EVENT_ID)
+ wlcore_event_max_tx_failure(wl,
+ le16_to_cpu(mbox->sta_tx_retry_exceeded));
+
+ if (vector & INACTIVE_STA_EVENT_ID)
+ wlcore_event_inactive_sta(wl,
+ le16_to_cpu(mbox->sta_aging_status));
+
+ if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
+ wlcore_event_roc_complete(wl);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/event.h b/drivers/net/wireless/ti/wl12xx/event.h
new file mode 100644
index 00000000000..a5cc3fcd9ee
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/event.h
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_EVENT_H__
+#define __WL12XX_EVENT_H__
+
+#include "../wlcore/wlcore.h"
+
+enum {
+ MEASUREMENT_START_EVENT_ID = BIT(8),
+ MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
+ SCAN_COMPLETE_EVENT_ID = BIT(10),
+ WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
+ AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
+ RESERVED1 = BIT(13),
+ PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
+ ROLE_STOP_COMPLETE_EVENT_ID = BIT(15),
+ RADAR_DETECTED_EVENT_ID = BIT(16),
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
+ BSS_LOSE_EVENT_ID = BIT(18),
+ REGAINED_BSS_EVENT_ID = BIT(19),
+ MAX_TX_RETRY_EVENT_ID = BIT(20),
+ DUMMY_PACKET_EVENT_ID = BIT(21),
+ SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
+ CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
+ SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
+ PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
+ INACTIVE_STA_EVENT_ID = BIT(26),
+ PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
+ PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
+ PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
+};
+
+struct wl12xx_event_mailbox {
+ __le32 events_vector;
+ __le32 events_mask;
+ __le32 reserved_1;
+ __le32 reserved_2;
+
+ u8 number_of_scan_results;
+ u8 scan_tag;
+ u8 completed_scan_status;
+ u8 reserved_3;
+
+ u8 soft_gemini_sense_info;
+ u8 soft_gemini_protective_info;
+ s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
+ u8 change_auto_mode_timeout;
+ u8 scheduled_scan_status;
+ u8 reserved4;
+ /* tuned channel (roc) */
+ u8 roc_channel;
+
+ __le16 hlid_removed_bitmap;
+
+ /* bitmap of aged stations (by HLID) */
+ __le16 sta_aging_status;
+
+ /* bitmap of stations (by HLID) which exceeded max tx retries */
+ __le16 sta_tx_retry_exceeded;
+
+ /* discovery completed results */
+ u8 discovery_tag;
+ u8 number_of_preq_results;
+ u8 number_of_prsp_results;
+ u8 reserved_5;
+
+ /* rx ba constraint */
+ u8 role_id; /* 0xFF means any role. */
+ u8 rx_ba_allowed;
+ u8 reserved_6[2];
+
+ /* Channel switch results */
+
+ u8 channel_switch_role_id;
+ u8 channel_switch_status;
+ u8 reserved_7[2];
+
+ u8 ps_poll_delivery_failure_role_ids;
+ u8 stopped_role_ids;
+ u8 started_role_ids;
+
+ u8 reserved_8[9];
+} __packed;
+
+int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout);
+int wl12xx_process_mailbox_events(struct wl1271 *wl);
+
+#endif
+
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index e5f5f8f3914..09694e39bb1 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -38,6 +38,8 @@
#include "reg.h"
#include "cmd.h"
#include "acx.h"
+#include "scan.h"
+#include "event.h"
#include "debugfs.h"
static char *fref_param;
@@ -208,6 +210,8 @@ static struct wlcore_conf wl12xx_conf = {
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
.tx_watchdog_timeout = 5000,
+ .slow_link_thold = 3,
+ .fast_link_thold = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -265,8 +269,10 @@ static struct wlcore_conf wl12xx_conf = {
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 100000,
- .max_dwell_time_passive = 100000,
+ .min_dwell_time_active_long = 25000,
+ .max_dwell_time_active_long = 50000,
+ .dwell_time_passive = 100000,
+ .dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.split_scan_timeout = 50000,
},
@@ -368,6 +374,10 @@ static struct wlcore_conf wl12xx_conf = {
.increase_time = 1,
.window_size = 16,
},
+ .recovery = {
+ .bug_on_recovery = 0,
+ .no_recovery = 0,
+ },
};
static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
@@ -601,9 +611,9 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
int ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
+ if (wl->chip.id != CHIP_ID_128X_PG20) {
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
- struct wl127x_rx_mem_pool_addr rx_mem_addr;
+ struct wl12xx_priv *priv = wl->priv;
/*
* Choose the block we want to read
@@ -612,13 +622,13 @@ static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
*/
u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
- rx_mem_addr.addr = (mem_block << 8) +
+ priv->rx_mem_addr->addr = (mem_block << 8) +
le32_to_cpu(wl_mem_map->packet_memory_pool_start);
- rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
+ priv->rx_mem_addr->addr_extra = priv->rx_mem_addr->addr + 4;
- ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
- sizeof(rx_mem_addr), false);
+ ret = wlcore_write(wl, WL1271_SLV_REG_DATA, priv->rx_mem_addr,
+ sizeof(*priv->rx_mem_addr), false);
if (ret < 0)
return ret;
}
@@ -631,13 +641,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
int ret = 0;
switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
+ case CHIP_ID_127X_PG10:
wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
wl->chip.id);
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
- WLCORE_QUIRK_TKIP_HEADER_SPACE;
+ WLCORE_QUIRK_TKIP_HEADER_SPACE |
+ WLCORE_QUIRK_START_STA_FAILS |
+ WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -646,18 +658,22 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
- wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
- WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
- WL127X_MINOR_VER);
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+ WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
+ WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
+ WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER,
+ WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
break;
- case CHIP_ID_1271_PG20:
+ case CHIP_ID_127X_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
- WLCORE_QUIRK_TKIP_HEADER_SPACE;
+ WLCORE_QUIRK_TKIP_HEADER_SPACE |
+ WLCORE_QUIRK_START_STA_FAILS |
+ WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -667,12 +683,14 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
- wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
- WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
- WL127X_MINOR_VER);
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+ WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
+ WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
+ WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER,
+ WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
break;
- case CHIP_ID_1283_PG20:
+ case CHIP_ID_128X_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
wl->plt_fw_name = WL128X_PLT_FW_NAME;
@@ -682,19 +700,29 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* wl128x requires TX blocksize alignment */
wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
- WLCORE_QUIRK_TKIP_HEADER_SPACE;
-
- wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER,
- WL128X_MAJOR_VER, WL128X_SUBTYPE_VER,
- WL128X_MINOR_VER);
+ WLCORE_QUIRK_TKIP_HEADER_SPACE |
+ WLCORE_QUIRK_START_STA_FAILS |
+ WLCORE_QUIRK_AP_ZERO_SESSION_ID;
+
+ wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
+ WL128X_IFTYPE_SR_VER, WL128X_MAJOR_SR_VER,
+ WL128X_SUBTYPE_SR_VER, WL128X_MINOR_SR_VER,
+ WL128X_IFTYPE_MR_VER, WL128X_MAJOR_MR_VER,
+ WL128X_SUBTYPE_MR_VER, WL128X_MINOR_MR_VER);
break;
- case CHIP_ID_1283_PG10:
+ case CHIP_ID_128X_PG10:
default:
wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
goto out;
}
+ /* common settings */
+ wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
+ wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
+ wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+ wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+ wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ;
out:
return ret;
}
@@ -1067,7 +1095,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
u32 clk;
int selected_clock = -1;
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
ret = wl128x_boot_clk(wl, &selected_clock);
if (ret < 0)
goto out;
@@ -1098,7 +1126,7 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
- if (wl->chip.id == CHIP_ID_1283_PG20)
+ if (wl->chip.id == CHIP_ID_128X_PG20)
clk |= ((selected_clock & 0x3) << 1) << 4;
else
clk |= (priv->ref_clock << 1) << 4;
@@ -1152,7 +1180,7 @@ static int wl12xx_pre_upload(struct wl1271 *wl)
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
if (ret < 0)
goto out;
@@ -1219,6 +1247,23 @@ static int wl12xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
+ wl->event_mask = BSS_LOSE_EVENT_ID |
+ REGAINED_BSS_EVENT_ID |
+ SCAN_COMPLETE_EVENT_ID |
+ ROLE_STOP_COMPLETE_EVENT_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID |
+ PSPOLL_DELIVERY_FAILURE_EVENT_ID |
+ SOFT_GEMINI_SENSE_EVENT_ID |
+ PERIODIC_SCAN_REPORT_EVENT_ID |
+ PERIODIC_SCAN_COMPLETE_EVENT_ID |
+ DUMMY_PACKET_EVENT_ID |
+ PEER_REMOVE_COMPLETE_EVENT_ID |
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID |
+ MAX_TX_RETRY_EVENT_ID |
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -1261,7 +1306,7 @@ static void
wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
u32 blks, u32 spare_blks)
{
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
desc->wl128x_mem.total_mem_blocks = blks;
} else {
desc->wl127x_mem.extra_blocks = spare_blks;
@@ -1275,7 +1320,7 @@ wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
{
u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
desc->length = cpu_to_le16(aligned_len >> 2);
@@ -1339,7 +1384,7 @@ static int wl12xx_hw_init(struct wl1271 *wl)
{
int ret;
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
ret = wl128x_cmd_general_parms(wl);
@@ -1394,22 +1439,6 @@ static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
return wlvif->rate_set;
}
-static int wl12xx_identify_fw(struct wl1271 *wl)
-{
- unsigned int *fw_ver = wl->chip.fw_ver;
-
- /* Only new station firmwares support routing fw logs to the host */
- if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
- (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
- wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
- /* This feature is not yet supported for AP mode */
- if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
- wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
- return 0;
-}
-
static void wl12xx_conf_init(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
@@ -1426,7 +1455,7 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
bool supported = false;
u8 major, minor;
- if (wl->chip.id == CHIP_ID_1283_PG20) {
+ if (wl->chip.id == CHIP_ID_128X_PG20) {
major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
@@ -1482,7 +1511,7 @@ static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
u16 die_info;
int ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
+ if (wl->chip.id == CHIP_ID_128X_PG20)
ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
&die_info);
else
@@ -1589,16 +1618,46 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return wlcore_set_key(wl, cmd, vif, sta, key_conf);
}
+static int wl12xx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ return wl1271_acx_set_ht_capabilities(wl, ht_cap, allow_ht_operation,
+ hlid);
+}
+
+static bool wl12xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ u8 thold;
+
+ if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map))
+ thold = wl->conf.tx.fast_link_thold;
+ else
+ thold = wl->conf.tx.slow_link_thold;
+
+ return lnk->allocated_pkts < thold;
+}
+
+static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ /* any link is good for low priority */
+ return true;
+}
+
static int wl12xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl12xx_ops = {
.setup = wl12xx_setup,
.identify_chip = wl12xx_identify_chip,
- .identify_fw = wl12xx_identify_fw,
.boot = wl12xx_boot,
.plt_init = wl12xx_plt_init,
.trigger_cmd = wl12xx_trigger_cmd,
.ack_event = wl12xx_ack_event,
+ .wait_for_event = wl12xx_wait_for_event,
+ .process_mailbox_events = wl12xx_process_mailbox_events,
.calc_tx_blocks = wl12xx_calc_tx_blocks,
.set_tx_desc_blocks = wl12xx_set_tx_desc_blocks,
.set_tx_desc_data_len = wl12xx_set_tx_desc_data_len,
@@ -1615,9 +1674,17 @@ static struct wlcore_ops wl12xx_ops = {
.set_rx_csum = NULL,
.ap_get_mimo_wide_rate_mask = NULL,
.debugfs_init = wl12xx_debugfs_add_files,
+ .scan_start = wl12xx_scan_start,
+ .scan_stop = wl12xx_scan_stop,
+ .sched_scan_start = wl12xx_sched_scan_start,
+ .sched_scan_stop = wl12xx_scan_sched_scan_stop,
.get_spare_blocks = wl12xx_get_spare_blocks,
.set_key = wl12xx_set_key,
+ .channel_switch = wl12xx_cmd_channel_switch,
.pre_pkt_send = NULL,
+ .set_peer_cap = wl12xx_set_peer_cap,
+ .lnk_high_prio = wl12xx_lnk_high_prio,
+ .lnk_low_prio = wl12xx_lnk_low_prio,
};
static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1636,11 +1703,13 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
- struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+ struct wl12xx_platform_data *pdata = pdev_data->pdata;
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
+ wl->num_channels = 1;
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
@@ -1693,6 +1762,10 @@ static int wl12xx_setup(struct wl1271 *wl)
wl1271_error("Invalid tcxo parameter %s", tcxo_param);
}
+ priv->rx_mem_addr = kmalloc(sizeof(*priv->rx_mem_addr), GFP_KERNEL);
+ if (!priv->rx_mem_addr)
+ return -ENOMEM;
+
return 0;
}
@@ -1703,7 +1776,8 @@ static int wl12xx_probe(struct platform_device *pdev)
int ret;
hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
- WL12XX_AGGR_BUFFER_SIZE);
+ WL12XX_AGGR_BUFFER_SIZE,
+ sizeof(struct wl12xx_event_mailbox));
if (IS_ERR(hw)) {
wl1271_error("can't allocate hw");
ret = PTR_ERR(hw);
@@ -1725,6 +1799,21 @@ out:
return ret;
}
+static int wl12xx_remove(struct platform_device *pdev)
+{
+ struct wl1271 *wl = platform_get_drvdata(pdev);
+ struct wl12xx_priv *priv;
+
+ if (!wl)
+ goto out;
+ priv = wl->priv;
+
+ kfree(priv->rx_mem_addr);
+
+out:
+ return wlcore_remove(pdev);
+}
+
static const struct platform_device_id wl12xx_id_table[] = {
{ "wl12xx", 0 },
{ } /* Terminating Entry */
@@ -1733,7 +1822,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
static struct platform_driver wl12xx_driver = {
.probe = wl12xx_probe,
- .remove = wlcore_remove,
+ .remove = wl12xx_remove,
.id_table = wl12xx_id_table,
.driver = {
.name = "wl12xx_driver",
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
new file mode 100644
index 00000000000..affdb3ec622
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -0,0 +1,501 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/ieee80211.h>
+#include "scan.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/tx.h"
+
+static int wl1271_get_scan_channels(struct wl1271 *wl,
+ struct cfg80211_scan_request *req,
+ struct basic_scan_channel_params *channels,
+ enum ieee80211_band band, bool passive)
+{
+ struct conf_scan_settings *c = &wl->conf.scan;
+ int i, j;
+ u32 flags;
+
+ for (i = 0, j = 0;
+ i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
+ i++) {
+ flags = req->channels[i]->flags;
+
+ if (!test_bit(i, wl->scan.scanned_ch) &&
+ !(flags & IEEE80211_CHAN_DISABLED) &&
+ (req->channels[i]->band == band) &&
+ /*
+ * In passive scans, we scan all remaining
+ * channels, even if not marked as such.
+ * In active scans, we only scan channels not
+ * marked as passive.
+ */
+ (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
+ wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
+ req->channels[i]->band,
+ req->channels[i]->center_freq);
+ wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
+ req->channels[i]->hw_value,
+ req->channels[i]->flags);
+ wl1271_debug(DEBUG_SCAN,
+ "max_antenna_gain %d, max_power %d",
+ req->channels[i]->max_antenna_gain,
+ req->channels[i]->max_power);
+ wl1271_debug(DEBUG_SCAN, "beacon_found %d",
+ req->channels[i]->beacon_found);
+
+ if (!passive) {
+ channels[j].min_duration =
+ cpu_to_le32(c->min_dwell_time_active);
+ channels[j].max_duration =
+ cpu_to_le32(c->max_dwell_time_active);
+ } else {
+ channels[j].min_duration =
+ cpu_to_le32(c->dwell_time_passive);
+ channels[j].max_duration =
+ cpu_to_le32(c->dwell_time_passive);
+ }
+ channels[j].early_termination = 0;
+ channels[j].tx_power_att = req->channels[i]->max_power;
+ channels[j].channel = req->channels[i]->hw_value;
+
+ memset(&channels[j].bssid_lsb, 0xff, 4);
+ memset(&channels[j].bssid_msb, 0xff, 2);
+
+ /* Mark the channels we already used */
+ set_bit(i, wl->scan.scanned_ch);
+
+ j++;
+ }
+ }
+
+ return j;
+}
+
+#define WL1271_NOTHING_TO_SCAN 1
+
+static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum ieee80211_band band,
+ bool passive, u32 basic_rate)
+{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct wl1271_cmd_scan *cmd;
+ struct wl1271_cmd_trigger_scan_to *trigger;
+ int ret;
+ u16 scan_options = 0;
+
+ /* skip active scans if we don't have SSIDs */
+ if (!passive && wl->scan.req->n_ssids == 0)
+ return WL1271_NOTHING_TO_SCAN;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!cmd || !trigger) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (wl->conf.scan.split_scan_timeout)
+ scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
+
+ if (passive)
+ scan_options |= WL1271_SCAN_OPT_PASSIVE;
+
+ cmd->params.role_id = wlvif->role_id;
+
+ if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd->params.scan_options = cpu_to_le16(scan_options);
+
+ cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
+ cmd->channels,
+ band, passive);
+ if (cmd->params.n_ch == 0) {
+ ret = WL1271_NOTHING_TO_SCAN;
+ goto out;
+ }
+
+ cmd->params.tx_rate = cpu_to_le32(basic_rate);
+ cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
+ cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
+ cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
+
+ if (band == IEEE80211_BAND_2GHZ)
+ cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
+ else
+ cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
+
+ if (wl->scan.ssid_len && wl->scan.ssid) {
+ cmd->params.ssid_len = wl->scan.ssid_len;
+ memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
+ }
+
+ memcpy(cmd->addr, vif->addr, ETH_ALEN);
+
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->params.role_id, band,
+ wl->scan.ssid, wl->scan.ssid_len,
+ wl->scan.req->ie,
+ wl->scan.req->ie_len, false);
+ if (ret < 0) {
+ wl1271_error("PROBE request template failed");
+ goto out;
+ }
+
+ trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
+ ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
+ sizeof(*trigger), 0);
+ if (ret < 0) {
+ wl1271_error("trigger scan to failed for hw scan");
+ goto out;
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN failed");
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ kfree(trigger);
+ return ret;
+}
+
+int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl1271_cmd_header *cmd = NULL;
+ int ret = 0;
+
+ if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
+ return -EINVAL;
+
+ wl1271_debug(DEBUG_CMD, "cmd scan stop");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
+ sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("cmd stop_scan failed");
+ goto out;
+ }
+out:
+ kfree(cmd);
+ return ret;
+}
+
+void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret = 0;
+ enum ieee80211_band band;
+ u32 rate, mask;
+
+ switch (wl->scan.state) {
+ case WL1271_SCAN_STATE_IDLE:
+ break;
+
+ case WL1271_SCAN_STATE_2GHZ_ACTIVE:
+ band = IEEE80211_BAND_2GHZ;
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, wlvif, band, false, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_2GHZ_PASSIVE:
+ band = IEEE80211_BAND_2GHZ;
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, wlvif, band, true, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ if (wl->enable_11a)
+ wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
+ else
+ wl->scan.state = WL1271_SCAN_STATE_DONE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_5GHZ_ACTIVE:
+ band = IEEE80211_BAND_5GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, wlvif, band, false, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_5GHZ_PASSIVE:
+ band = IEEE80211_BAND_5GHZ;
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, wlvif, band, true, rate);
+ if (ret == WL1271_NOTHING_TO_SCAN) {
+ wl->scan.state = WL1271_SCAN_STATE_DONE;
+ wl1271_scan_stm(wl, wlvif);
+ }
+
+ break;
+
+ case WL1271_SCAN_STATE_DONE:
+ wl->scan.failed = false;
+ cancel_delayed_work(&wl->scan_complete_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(0));
+ break;
+
+ default:
+ wl1271_error("invalid scan state");
+ break;
+ }
+
+ if (ret < 0) {
+ cancel_delayed_work(&wl->scan_complete_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(0));
+ }
+}
+
+static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
+ struct wlcore_scan_channels *cmd_channels)
+{
+ memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
+ memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
+ cmd->dfs = cmd_channels->dfs;
+ cmd->n_pactive_ch = cmd_channels->passive_active;
+
+ memcpy(cmd->channels_2, cmd_channels->channels_2,
+ sizeof(cmd->channels_2));
+ memcpy(cmd->channels_5, cmd_channels->channels_5,
+ sizeof(cmd->channels_2));
+ /* channels_4 are not supported, so no need to copy them */
+}
+
+int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct wl1271_cmd_sched_scan_config *cfg = NULL;
+ struct wlcore_scan_channels *cfg_channels = NULL;
+ struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+ int i, ret;
+ bool force_passive = !req->n_ssids;
+
+ wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ cfg->role_id = wlvif->role_id;
+ cfg->rssi_threshold = c->rssi_threshold;
+ cfg->snr_threshold = c->snr_threshold;
+ cfg->n_probe_reqs = c->num_probe_reqs;
+ /* cycles set to 0 it means infinite (until manually stopped) */
+ cfg->cycles = 0;
+ /* report APs when at least 1 is found */
+ cfg->report_after = 1;
+ /* don't stop scanning automatically when something is found */
+ cfg->terminate = 0;
+ cfg->tag = WL1271_SCAN_DEFAULT_TAG;
+ /* don't filter on BSS type */
+ cfg->bss_type = SCAN_BSS_TYPE_ANY;
+ /* currently NL80211 supports only a single interval */
+ for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
+ cfg->intervals[i] = cpu_to_le32(req->interval);
+
+ cfg->ssid_len = 0;
+ ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
+ if (ret < 0)
+ goto out;
+
+ cfg->filter_type = ret;
+
+ wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
+
+ cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL);
+ if (!cfg_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels,
+ req->n_channels, req->n_ssids,
+ SCAN_TYPE_PERIODIC)) {
+ wl1271_error("scan channel list is empty");
+ ret = -EINVAL;
+ goto out;
+ }
+ wl12xx_adjust_channels(cfg, cfg_channels);
+
+ if (!force_passive && cfg->active[0]) {
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ wlvif->role_id, band,
+ req->ssids[0].ssid,
+ req->ssids[0].ssid_len,
+ ies->ie[band],
+ ies->len[band], true);
+ if (ret < 0) {
+ wl1271_error("2.4GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ if (!force_passive && cfg->active[1]) {
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ wlvif->role_id, band,
+ req->ssids[0].ssid,
+ req->ssids[0].ssid_len,
+ ies->ie[band],
+ ies->len[band], true);
+ if (ret < 0) {
+ wl1271_error("5GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
+
+ ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
+ sizeof(*cfg), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN configuration failed");
+ goto out;
+ }
+out:
+ kfree(cfg_channels);
+ kfree(cfg);
+ return ret;
+}
+
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl1271_cmd_sched_scan_start *start;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
+
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
+ return -EOPNOTSUPP;
+
+ if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return -EBUSY;
+
+ start = kzalloc(sizeof(*start), GFP_KERNEL);
+ if (!start)
+ return -ENOMEM;
+
+ start->role_id = wlvif->role_id;
+ start->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
+ sizeof(*start), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send scan start command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(start);
+ return ret;
+}
+
+int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ int ret;
+
+ ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
+ if (ret < 0)
+ return ret;
+
+ return wl1271_scan_sched_scan_start(wl, wlvif);
+}
+
+void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct wl1271_cmd_sched_scan_stop *stop;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
+
+ /* FIXME: what to do if alloc'ing to stop fails? */
+ stop = kzalloc(sizeof(*stop), GFP_KERNEL);
+ if (!stop) {
+ wl1271_error("failed to alloc memory to send sched scan stop");
+ return;
+ }
+
+ stop->role_id = wlvif->role_id;
+ stop->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
+ sizeof(*stop), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send sched scan stop command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(stop);
+}
+
+int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req)
+{
+ wl1271_scan_stm(wl, wlvif);
+ return 0;
+}
+
+void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ wl1271_scan_stm(wl, wlvif);
+}
diff --git a/drivers/net/wireless/ti/wl12xx/scan.h b/drivers/net/wireless/ti/wl12xx/scan.h
new file mode 100644
index 00000000000..264af7ac278
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/scan.h
@@ -0,0 +1,140 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_SCAN_H__
+#define __WL12XX_SCAN_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/scan.h"
+
+#define WL12XX_MAX_CHANNELS_5GHZ 23
+
+struct basic_scan_params {
+ /* Scan option flags (WL1271_SCAN_OPT_*) */
+ __le16 scan_options;
+ u8 role_id;
+ /* Number of scan channels in the list (maximum 30) */
+ u8 n_ch;
+ /* This field indicates the number of probe requests to send
+ per channel for an active scan */
+ u8 n_probe_reqs;
+ u8 tid_trigger;
+ u8 ssid_len;
+ u8 use_ssid_list;
+
+ /* Rate bit field for sending the probes */
+ __le32 tx_rate;
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ /* Band to scan */
+ u8 band;
+
+ u8 scan_tag;
+ u8 padding2[2];
+} __packed;
+
+struct basic_scan_channel_params {
+ /* Duration in TU to wait for frames on a channel for active scan */
+ __le32 min_duration;
+ __le32 max_duration;
+ __le32 bssid_lsb;
+ __le16 bssid_msb;
+ u8 early_termination;
+ u8 tx_power_att;
+ u8 channel;
+ /* FW internal use only! */
+ u8 dfs_candidate;
+ u8 activity_detected;
+ u8 pad;
+} __packed;
+
+struct wl1271_cmd_scan {
+ struct wl1271_cmd_header header;
+
+ struct basic_scan_params params;
+ struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
+
+ /* src mac address */
+ u8 addr[ETH_ALEN];
+ u8 padding[2];
+} __packed;
+
+struct wl1271_cmd_sched_scan_config {
+ struct wl1271_cmd_header header;
+
+ __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
+
+ s8 rssi_threshold; /* for filtering (in dBm) */
+ s8 snr_threshold; /* for filtering (in dB) */
+
+ u8 cycles; /* maximum number of scan cycles */
+ u8 report_after; /* report when this number of results are received */
+ u8 terminate; /* stop scanning after reporting */
+
+ u8 tag;
+ u8 bss_type; /* for filtering */
+ u8 filter_type;
+
+ u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+
+ u8 n_probe_reqs; /* Number of probes requests per channel */
+
+ u8 passive[SCAN_MAX_BANDS];
+ u8 active[SCAN_MAX_BANDS];
+
+ u8 dfs;
+
+ u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
+ channels in BG band */
+ u8 role_id;
+ u8 padding[1];
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[WL12XX_MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+} __packed;
+
+struct wl1271_cmd_sched_scan_start {
+ struct wl1271_cmd_header header;
+
+ u8 tag;
+ u8 role_id;
+ u8 padding[2];
+} __packed;
+
+struct wl1271_cmd_sched_scan_stop {
+ struct wl1271_cmd_header header;
+
+ u8 tag;
+ u8 role_id;
+ u8 padding[2];
+} __packed;
+
+int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req);
+int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+#endif
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 7182bbf6625..d4552857480 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -24,19 +24,37 @@
#include "conf.h"
-/* minimum FW required for driver for wl127x */
+/* WiLink 6/7 chip IDs */
+#define CHIP_ID_127X_PG10 (0x04030101)
+#define CHIP_ID_127X_PG20 (0x04030111)
+#define CHIP_ID_128X_PG10 (0x05030101)
+#define CHIP_ID_128X_PG20 (0x05030111)
+
+/* FW chip version for wl127x */
#define WL127X_CHIP_VER 6
-#define WL127X_IFTYPE_VER 3
-#define WL127X_MAJOR_VER 10
-#define WL127X_SUBTYPE_VER 2
-#define WL127X_MINOR_VER 115
+/* minimum single-role FW version for wl127x */
+#define WL127X_IFTYPE_SR_VER 3
+#define WL127X_MAJOR_SR_VER 10
+#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
+#define WL127X_MINOR_SR_VER 115
+/* minimum multi-role FW version for wl127x */
+#define WL127X_IFTYPE_MR_VER 5
+#define WL127X_MAJOR_MR_VER 7
+#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
+#define WL127X_MINOR_MR_VER 115
-/* minimum FW required for driver for wl128x */
+/* FW chip version for wl128x */
#define WL128X_CHIP_VER 7
-#define WL128X_IFTYPE_VER 3
-#define WL128X_MAJOR_VER 10
-#define WL128X_SUBTYPE_VER 2
-#define WL128X_MINOR_VER 115
+/* minimum single-role FW version for wl128x */
+#define WL128X_IFTYPE_SR_VER 3
+#define WL128X_MAJOR_SR_VER 10
+#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
+#define WL128X_MINOR_SR_VER 115
+/* minimum multi-role FW version for wl128x */
+#define WL128X_IFTYPE_MR_VER 5
+#define WL128X_MAJOR_MR_VER 7
+#define WL128X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
+#define WL128X_MINOR_MR_VER 42
#define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -55,6 +73,8 @@ struct wl12xx_priv {
int ref_clock;
int tcxo_clock;
+
+ struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile
index 67c098734c7..ae2b8173578 100644
--- a/drivers/net/wireless/ti/wl18xx/Makefile
+++ b/drivers/net/wireless/ti/wl18xx/Makefile
@@ -1,3 +1,3 @@
-wl18xx-objs = main.o acx.o tx.o io.o debugfs.o
+wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
obj-$(CONFIG_WL18XX) += wl18xx.o
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 72840e23bf5..a169bb5a5db 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -75,7 +75,7 @@ int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
- ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
+ ret = wl1271_cmd_configure(wl, ACX_CSUM_CONFIG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set Tx checksum state: %d", ret);
goto out;
@@ -109,3 +109,88 @@ out:
kfree(acx);
return ret;
}
+
+int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide)
+{
+ struct wlcore_peer_ht_operation_mode *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx peer ht operation mode hlid %d bw %d",
+ hlid, wide);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->hlid = hlid;
+ acx->bandwidth = wide ? WLCORE_BANDWIDTH_40MHZ : WLCORE_BANDWIDTH_20MHZ;
+
+ ret = wl1271_cmd_configure(wl, ACX_PEER_HT_OPERATION_MODE_CFG, acx,
+ sizeof(*acx));
+
+ if (ret < 0) {
+ wl1271_warning("acx peer ht operation mode failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+
+}
+
+/*
+ * this command is basically the same as wl1271_acx_ht_capabilities,
+ * with the addition of supported rates. they should be unified in
+ * the next fw api change
+ */
+int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ struct wlcore_acx_peer_cap *acx;
+ int ret = 0;
+ u32 ht_capabilites = 0;
+
+ wl1271_debug(DEBUG_ACX,
+ "acx set cap ht_supp: %d ht_cap: %d rates: 0x%x",
+ ht_cap->ht_supported, ht_cap->cap, rate_set);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (allow_ht_operation && ht_cap->ht_supported) {
+ /* no need to translate capabilities - use the spec values */
+ ht_capabilites = ht_cap->cap;
+
+ /*
+ * this bit is not employed by the spec but only by FW to
+ * indicate peer HT support
+ */
+ ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
+
+ /* get data from A-MPDU parameters field */
+ acx->ampdu_max_length = ht_cap->ampdu_factor;
+ acx->ampdu_min_spacing = ht_cap->ampdu_density;
+ }
+
+ acx->hlid = hlid;
+ acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+ acx->supported_rates = cpu_to_le32(rate_set);
+
+ ret = wl1271_cmd_configure(wl, ACX_PEER_CAP, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ht capabilities setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index e2609a6b734..0e636def121 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -26,7 +26,13 @@
#include "../wlcore/acx.h"
enum {
- ACX_CLEAR_STATISTICS = 0x0047,
+ ACX_NS_IPV6_FILTER = 0x0050,
+ ACX_PEER_HT_OPERATION_MODE_CFG = 0x0051,
+ ACX_CSUM_CONFIG = 0x0052,
+ ACX_SIM_CONFIG = 0x0053,
+ ACX_CLEAR_STATISTICS = 0x0054,
+ ACX_AUTO_RX_STREAMING = 0x0055,
+ ACX_PEER_CAP = 0x0056
};
/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -278,10 +284,57 @@ struct wl18xx_acx_clear_statistics {
struct acx_header header;
};
+enum wlcore_bandwidth {
+ WLCORE_BANDWIDTH_20MHZ,
+ WLCORE_BANDWIDTH_40MHZ,
+};
+
+struct wlcore_peer_ht_operation_mode {
+ struct acx_header header;
+
+ u8 hlid;
+ u8 bandwidth; /* enum wlcore_bandwidth */
+ u8 padding[2];
+};
+
+/*
+ * ACX_PEER_CAP
+ * this struct is very similar to wl1271_acx_ht_capabilities, with the
+ * addition of supported rates
+ */
+struct wlcore_acx_peer_cap {
+ struct acx_header header;
+
+ /* bitmask of capability bits supported by the peer */
+ __le32 ht_capabilites;
+
+ /* rates supported by the remote peer */
+ __le32 supported_rates;
+
+ /* Indicates to which link these capabilities apply. */
+ u8 hlid;
+
+ /*
+ * This the maximum A-MPDU length supported by the AP. The FW may not
+ * exceed this length when sending A-MPDUs
+ */
+ u8 ampdu_max_length;
+
+ /* This is the minimal spacing required when sending A-MPDUs to the AP*/
+ u8 ampdu_min_spacing;
+
+ u8 padding;
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
int wl18xx_acx_clear_statistics(struct wl1271 *wl);
+int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide);
+int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid);
#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
new file mode 100644
index 00000000000..1d1f6cc7a50
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -0,0 +1,80 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/hw_ops.h"
+
+#include "cmd.h"
+
+int wl18xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct wl18xx_cmd_channel_switch *cmd;
+ u32 supported_rates;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "cmd channel switch");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+ cmd->channel = ch_switch->channel->hw_value;
+ cmd->switch_time = ch_switch->count;
+ cmd->stop_tx = ch_switch->block_tx;
+
+ switch (ch_switch->channel->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = WLCORE_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = WLCORE_BAND_5GHZ;
+ break;
+ default:
+ wl1271_error("invalid channel switch band: %d",
+ ch_switch->channel->band);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
+ wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+ if (wlvif->p2p)
+ supported_rates &= ~CONF_TX_CCK_RATES;
+ cmd->local_supported_rates = cpu_to_le32(supported_rates);
+ cmd->channel_type = wlvif->channel_type;
+
+ ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send channel switch command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h
new file mode 100644
index 00000000000..6687d10899a
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/cmd.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_CMD_H__
+#define __WL18XX_CMD_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+struct wl18xx_cmd_channel_switch {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+
+ /* The new serving channel */
+ u8 channel;
+ /* Relative time of the serving channel switch in TBTT units */
+ u8 switch_time;
+ /* Stop the role TX, should expect it after radar detection */
+ u8 stop_tx;
+
+ __le32 local_supported_rates;
+
+ u8 channel_type;
+ u8 band;
+
+ u8 padding[2];
+} __packed;
+
+int wl18xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch);
+
+#endif
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
index 4d426cc2027..e34302e3b51 100644
--- a/drivers/net/wireless/ti/wl18xx/conf.h
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -23,20 +23,21 @@
#define __WL18XX_CONF_H__
#define WL18XX_CONF_MAGIC 0x10e100ca
-#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0003)
+#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0006)
#define WL18XX_CONF_MASK 0x0000ffff
#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
sizeof(struct wl18xx_priv_conf))
#define NUM_OF_CHANNELS_11_ABG 150
#define NUM_OF_CHANNELS_11_P 7
-#define WL18XX_NUM_OF_SUB_BANDS 9
#define SRF_TABLE_LEN 16
#define PIN_MUXING_SIZE 2
+#define WL18XX_TRACE_LOSS_GAPS_TX 10
+#define WL18XX_TRACE_LOSS_GAPS_RX 18
struct wl18xx_mac_and_phy_params {
u8 phy_standalone;
- u8 rdl;
+ u8 spare0;
u8 enable_clpc;
u8 enable_tx_low_pwr_on_siso_rdl;
u8 auto_detect;
@@ -69,18 +70,27 @@ struct wl18xx_mac_and_phy_params {
u8 pwr_limit_reference_11_abg;
u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
u8 pwr_limit_reference_11p;
- u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
- u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+ u8 spare1;
+ u8 per_chan_bo_mode_11_abg[13];
+ u8 per_chan_bo_mode_11_p[4];
u8 primary_clock_setting_time;
u8 clock_valid_on_wake_up;
u8 secondary_clock_setting_time;
u8 board_type;
/* enable point saturation */
u8 psat;
- /* low/medium/high Tx power in dBm */
+ /* low/medium/high Tx power in dBm for STA-HP BG */
s8 low_power_val;
s8 med_power_val;
s8 high_power_val;
+ s8 per_sub_band_tx_trace_loss[WL18XX_TRACE_LOSS_GAPS_TX];
+ s8 per_sub_band_rx_trace_loss[WL18XX_TRACE_LOSS_GAPS_RX];
+ u8 tx_rf_margin;
+ /* low/medium/high Tx power in dBm for other role */
+ s8 low_power_val_2nd;
+ s8 med_power_val_2nd;
+ s8 high_power_val_2nd;
+
u8 padding[1];
} __packed;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
new file mode 100644
index 00000000000..c9199d7804c
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "event.h"
+#include "scan.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout)
+{
+ u32 local_event;
+
+ switch (event) {
+ case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
+ local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
+ break;
+
+ case WLCORE_EVENT_DFS_CONFIG_COMPLETE:
+ local_event = DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+ break;
+
+ default:
+ /* event not implemented */
+ return 0;
+ }
+ return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
+}
+
+int wl18xx_process_mailbox_events(struct wl1271 *wl)
+{
+ struct wl18xx_event_mailbox *mbox = wl->mbox;
+ u32 vector;
+
+ vector = le32_to_cpu(mbox->events_vector);
+ wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
+
+ if (vector & SCAN_COMPLETE_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "scan results: %d",
+ mbox->number_of_scan_results);
+
+ if (wl->scan_wlvif)
+ wl18xx_scan_completed(wl, wl->scan_wlvif);
+ }
+
+ if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT,
+ "PERIODIC_SCAN_REPORT_EVENT (results %d)",
+ mbox->number_of_sched_scan_results);
+
+ wlcore_scan_sched_scan_results(wl);
+ }
+
+ if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
+ wlcore_event_sched_scan_completed(wl, 1);
+
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
+ wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
+
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
+ wlcore_event_ba_rx_constraint(wl,
+ le16_to_cpu(mbox->rx_ba_role_id_bitmap),
+ le16_to_cpu(mbox->rx_ba_allowed_bitmap));
+
+ if (vector & BSS_LOSS_EVENT_ID)
+ wlcore_event_beacon_loss(wl,
+ le16_to_cpu(mbox->bss_loss_bitmap));
+
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
+ wlcore_event_channel_switch(wl,
+ le16_to_cpu(mbox->channel_switch_role_id_bitmap),
+ true);
+
+ if (vector & DUMMY_PACKET_EVENT_ID)
+ wlcore_event_dummy_packet(wl);
+
+ /*
+ * "TX retries exceeded" has a different meaning according to mode.
+ * In AP mode the offending station is disconnected.
+ */
+ if (vector & MAX_TX_FAILURE_EVENT_ID)
+ wlcore_event_max_tx_failure(wl,
+ le32_to_cpu(mbox->tx_retry_exceeded_bitmap));
+
+ if (vector & INACTIVE_STA_EVENT_ID)
+ wlcore_event_inactive_sta(wl,
+ le32_to_cpu(mbox->inactive_sta_bitmap));
+
+ if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
+ wlcore_event_roc_complete(wl);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
new file mode 100644
index 00000000000..398f3d2c0a6
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -0,0 +1,77 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_EVENT_H__
+#define __WL18XX_EVENT_H__
+
+#include "../wlcore/wlcore.h"
+
+enum {
+ SCAN_COMPLETE_EVENT_ID = BIT(8),
+ RADAR_DETECTED_EVENT_ID = BIT(9),
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(10),
+ BSS_LOSS_EVENT_ID = BIT(11),
+ MAX_TX_FAILURE_EVENT_ID = BIT(12),
+ DUMMY_PACKET_EVENT_ID = BIT(13),
+ INACTIVE_STA_EVENT_ID = BIT(14),
+ PEER_REMOVE_COMPLETE_EVENT_ID = BIT(15),
+ PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(16),
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(17),
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
+ DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
+ PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
+};
+
+struct wl18xx_event_mailbox {
+ __le32 events_vector;
+
+ u8 number_of_scan_results;
+ u8 number_of_sched_scan_results;
+
+ __le16 channel_switch_role_id_bitmap;
+
+ s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
+
+ /* bitmap of removed links */
+ __le32 hlid_removed_bitmap;
+
+ /* rx ba constraint */
+ __le16 rx_ba_role_id_bitmap; /* 0xfff means any role. */
+ __le16 rx_ba_allowed_bitmap;
+
+ /* bitmap of roc completed (by role id) */
+ __le16 roc_completed_bitmap;
+
+ /* bitmap of stations (by role id) with bss loss */
+ __le16 bss_loss_bitmap;
+
+ /* bitmap of stations (by HLID) which exceeded max tx retries */
+ __le32 tx_retry_exceeded_bitmap;
+
+ /* bitmap of inactive stations (by HLID) */
+ __le32 inactive_sta_bitmap;
+} __packed;
+
+int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout);
+int wl18xx_process_mailbox_events(struct wl1271 *wl);
+
+#endif
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 8d8c1f8c63b..da3ef1b10a9 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -34,10 +34,13 @@
#include "reg.h"
#include "conf.h"
+#include "cmd.h"
#include "acx.h"
#include "tx.h"
#include "wl18xx.h"
#include "io.h"
+#include "scan.h"
+#include "event.h"
#include "debugfs.h"
#define WL18XX_RX_CHECKSUM_MASK 0x40
@@ -334,6 +337,8 @@ static struct wlcore_conf wl18xx_conf = {
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
.tx_watchdog_timeout = 5000,
+ .slow_link_thold = 3,
+ .fast_link_thold = 30,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -391,8 +396,10 @@ static struct wlcore_conf wl18xx_conf = {
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 100000,
- .max_dwell_time_passive = 100000,
+ .min_dwell_time_active_long = 25000,
+ .max_dwell_time_active_long = 50000,
+ .dwell_time_passive = 100000,
+ .dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.split_scan_timeout = 50000,
},
@@ -489,6 +496,10 @@ static struct wlcore_conf wl18xx_conf = {
.increase_time = 1,
.window_size = 16,
},
+ .recovery = {
+ .bug_on_recovery = 0,
+ .no_recovery = 0,
+ },
};
static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
@@ -501,7 +512,6 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.clock_valid_on_wake_up = 0x00,
.secondary_clock_setting_time = 0x05,
.board_type = BOARD_TYPE_HDK_18XX,
- .rdl = 0x01,
.auto_detect = 0x00,
.dedicated_fem = FEM_NONE,
.low_band_component = COMPONENT_3_WAY_SWITCH,
@@ -517,14 +527,44 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.enable_clpc = 0x00,
.enable_tx_low_pwr_on_siso_rdl = 0x00,
.rx_profile = 0x00,
- .pwr_limit_reference_11_abg = 0xc8,
+ .pwr_limit_reference_11_abg = 0x64,
+ .per_chan_pwr_limit_arr_11abg = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .pwr_limit_reference_11p = 0x64,
+ .per_chan_bo_mode_11_abg = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00 },
+ .per_chan_bo_mode_11_p = { 0x00, 0x00, 0x00, 0x00 },
+ .per_chan_pwr_limit_arr_11p = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff },
.psat = 0,
- .low_power_val = 0x00,
- .med_power_val = 0x0a,
- .high_power_val = 0x1e,
+ .low_power_val = 0x08,
+ .med_power_val = 0x12,
+ .high_power_val = 0x18,
+ .low_power_val_2nd = 0x05,
+ .med_power_val_2nd = 0x0a,
+ .high_power_val_2nd = 0x14,
.external_pa_dc2dc = 0,
- .number_of_assembled_ant2_4 = 1,
+ .number_of_assembled_ant2_4 = 2,
.number_of_assembled_ant5 = 1,
+ .tx_rf_margin = 1,
},
};
@@ -595,7 +635,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -608,15 +648,18 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
wl->sr_fw_name = WL18XX_FW_NAME;
/* wl18xx uses the same firmware for PLT */
wl->plt_fw_name = WL18XX_FW_NAME;
- wl->quirks |= WLCORE_QUIRK_NO_ELP |
- WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
+ wl->quirks |= WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
- WLCORE_QUIRK_TX_PAD_LAST_FRAME;
-
- wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER,
- WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER,
- WL18XX_MINOR_VER);
+ WLCORE_QUIRK_TX_PAD_LAST_FRAME |
+ WLCORE_QUIRK_REGDOMAIN_CONF |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL;
+
+ wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER,
+ WL18XX_IFTYPE_VER, WL18XX_MAJOR_VER,
+ WL18XX_SUBTYPE_VER, WL18XX_MINOR_VER,
+ /* there's no separate multi-role FW */
+ 0, 0, 0, 0);
break;
case CHIP_ID_185x_PG10:
wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
@@ -630,6 +673,11 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
goto out;
}
+ wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+ wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+ wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
+ wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC;
+ wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ;
out:
return ret;
}
@@ -843,6 +891,20 @@ static int wl18xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
+ wl->event_mask = BSS_LOSS_EVENT_ID |
+ SCAN_COMPLETE_EVENT_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID |
+ PERIODIC_SCAN_COMPLETE_EVENT_ID |
+ PERIODIC_SCAN_REPORT_EVENT_ID |
+ DUMMY_PACKET_EVENT_ID |
+ PEER_REMOVE_COMPLETE_EVENT_ID |
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID |
+ MAX_TX_FAILURE_EVENT_ID |
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID |
+ DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -964,7 +1026,7 @@ static int wl18xx_hw_init(struct wl1271 *wl)
/* (re)init private structures. Relevant on recovery as well. */
priv->last_fw_rls_idx = 0;
- priv->extra_spare_vif_count = 0;
+ priv->extra_spare_key_count = 0;
/* set the default amount of spare blocks in the bitmap */
ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
@@ -1022,7 +1084,12 @@ static bool wl18xx_is_mimo_supported(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
- return priv->conf.phy.number_of_assembled_ant2_4 >= 2;
+ /* only support MIMO with multiple antennas, and when SISO
+ * is not forced through config
+ */
+ return (priv->conf.phy.number_of_assembled_ant2_4 >= 2) &&
+ (priv->conf.ht.mode != HT_MODE_WIDE) &&
+ (priv->conf.ht.mode != HT_MODE_SISO20);
}
/*
@@ -1223,8 +1290,8 @@ static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
{
struct wl18xx_priv *priv = wl->priv;
- /* If we have VIFs requiring extra spare, indulge them */
- if (priv->extra_spare_vif_count)
+ /* If we have keys requiring extra spare, indulge them */
+ if (priv->extra_spare_key_count)
return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
return WL18XX_TX_HW_BLOCK_SPARE;
@@ -1236,42 +1303,48 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl18xx_priv *priv = wl->priv;
- bool change_spare = false;
+ bool change_spare = false, special_enc;
int ret;
+ wl1271_debug(DEBUG_CRYPT, "extra spare keys before: %d",
+ priv->extra_spare_key_count);
+
+ special_enc = key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+ key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
+
+ ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
+ if (ret < 0)
+ goto out;
+
/*
- * when adding the first or removing the last GEM/TKIP interface,
+ * when adding the first or removing the last GEM/TKIP key,
* we have to adjust the number of spare blocks.
*/
- change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
- key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
- ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
- (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
+ if (special_enc) {
+ if (cmd == SET_KEY) {
+ /* first key */
+ change_spare = (priv->extra_spare_key_count == 0);
+ priv->extra_spare_key_count++;
+ } else if (cmd == DISABLE_KEY) {
+ /* last key */
+ change_spare = (priv->extra_spare_key_count == 1);
+ priv->extra_spare_key_count--;
+ }
+ }
- /* no need to change spare - just regular set_key */
- if (!change_spare)
- return wlcore_set_key(wl, cmd, vif, sta, key_conf);
+ wl1271_debug(DEBUG_CRYPT, "extra spare keys after: %d",
+ priv->extra_spare_key_count);
- ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
- if (ret < 0)
+ if (!change_spare)
goto out;
/* key is now set, change the spare blocks */
- if (cmd == SET_KEY) {
+ if (priv->extra_spare_key_count)
ret = wl18xx_set_host_cfg_bitmap(wl,
WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
- if (ret < 0)
- goto out;
-
- priv->extra_spare_vif_count++;
- } else {
+ else
ret = wl18xx_set_host_cfg_bitmap(wl,
WL18XX_TX_HW_BLOCK_SPARE);
- if (ret < 0)
- goto out;
-
- priv->extra_spare_vif_count--;
- }
out:
return ret;
@@ -1296,6 +1369,92 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
return buf_offset;
}
+static void wl18xx_sta_rc_update(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ bool wide = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide);
+
+ if (!(changed & IEEE80211_RC_BW_CHANGED))
+ return;
+
+ mutex_lock(&wl->mutex);
+
+ /* sanity */
+ if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
+ goto out;
+
+ /* ignore the change before association */
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ goto out;
+
+ /*
+ * If we started out as wide, we can change the operation mode. If we
+ * thought this was a 20mhz AP, we have to reconnect
+ */
+ if (wlvif->sta.role_chan_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->sta.role_chan_type == NL80211_CHAN_HT40PLUS)
+ wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide);
+ else
+ ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif));
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl18xx_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ return wl18xx_acx_set_peer_cap(wl, ht_cap, allow_ht_operation,
+ rate_set, hlid);
+}
+
+static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ u8 thold;
+ struct wl18xx_fw_status_priv *status_priv =
+ (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+
+ /* suspended links are never high priority */
+ if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+ return false;
+
+ /* the priority thresholds are taken from FW */
+ if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
+ !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+ thold = status_priv->tx_fast_link_prio_threshold;
+ else
+ thold = status_priv->tx_slow_link_prio_threshold;
+
+ return lnk->allocated_pkts < thold;
+}
+
+static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ u8 thold;
+ struct wl18xx_fw_status_priv *status_priv =
+ (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+
+ if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+ thold = status_priv->tx_suspend_threshold;
+ else if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
+ !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+ thold = status_priv->tx_fast_stop_threshold;
+ else
+ thold = status_priv->tx_slow_stop_threshold;
+
+ return lnk->allocated_pkts < thold;
+}
+
static int wl18xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl18xx_ops = {
@@ -1305,6 +1464,8 @@ static struct wlcore_ops wl18xx_ops = {
.plt_init = wl18xx_plt_init,
.trigger_cmd = wl18xx_trigger_cmd,
.ack_event = wl18xx_ack_event,
+ .wait_for_event = wl18xx_wait_for_event,
+ .process_mailbox_events = wl18xx_process_mailbox_events,
.calc_tx_blocks = wl18xx_calc_tx_blocks,
.set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
.set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
@@ -1320,16 +1481,26 @@ static struct wlcore_ops wl18xx_ops = {
.ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
.get_mac = wl18xx_get_mac,
.debugfs_init = wl18xx_debugfs_add_files,
+ .scan_start = wl18xx_scan_start,
+ .scan_stop = wl18xx_scan_stop,
+ .sched_scan_start = wl18xx_sched_scan_start,
+ .sched_scan_stop = wl18xx_scan_sched_scan_stop,
.handle_static_data = wl18xx_handle_static_data,
.get_spare_blocks = wl18xx_get_spare_blocks,
.set_key = wl18xx_set_key,
+ .channel_switch = wl18xx_cmd_channel_switch,
.pre_pkt_send = wl18xx_pre_pkt_send,
+ .sta_rc_update = wl18xx_sta_rc_update,
+ .set_peer_cap = wl18xx_set_peer_cap,
+ .lnk_high_prio = wl18xx_lnk_high_prio,
+ .lnk_low_prio = wl18xx_lnk_low_prio,
};
/* HT cap appropriate for wide channels in 2Ghz */
static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1343,7 +1514,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
/* HT cap appropriate for wide channels in 5Ghz */
static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1356,7 +1528,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
/* HT cap appropriate for SISO 20 */
static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
- .cap = IEEE80211_HT_CAP_SGI_20,
+ .cap = IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1369,7 +1542,8 @@ static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
/* HT cap appropriate for MIMO rates in 20mhz channel */
static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
- .cap = IEEE80211_HT_CAP_SGI_20,
+ .cap = IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -1387,7 +1561,8 @@ static int wl18xx_setup(struct wl1271 *wl)
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
- wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+ wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
+ wl->num_channels = 2;
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
@@ -1506,7 +1681,8 @@ static int wl18xx_probe(struct platform_device *pdev)
int ret;
hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
- WL18XX_AGGR_BUFFER_SIZE);
+ WL18XX_AGGR_BUFFER_SIZE,
+ sizeof(struct wl18xx_event_mailbox));
if (IS_ERR(hw)) {
wl1271_error("can't allocate hw");
ret = PTR_ERR(hw);
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
new file mode 100644
index 00000000000..09d944505ac
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -0,0 +1,326 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/ieee80211.h>
+#include "scan.h"
+#include "../wlcore/debug.h"
+
+static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
+ struct wlcore_scan_channels *cmd_channels)
+{
+ memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
+ memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
+ cmd->dfs = cmd_channels->dfs;
+ cmd->passive_active = cmd_channels->passive_active;
+
+ memcpy(cmd->channels_2, cmd_channels->channels_2,
+ sizeof(cmd->channels_2));
+ memcpy(cmd->channels_5, cmd_channels->channels_5,
+ sizeof(cmd->channels_2));
+ /* channels_4 are not supported, so no need to copy them */
+}
+
+static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req)
+{
+ struct wl18xx_cmd_scan_params *cmd;
+ struct wlcore_scan_channels *cmd_channels = NULL;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+
+ if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd->scan_type = SCAN_TYPE_SEARCH;
+ cmd->rssi_threshold = -127;
+ cmd->snr_threshold = 0;
+
+ cmd->bss_type = SCAN_BSS_TYPE_ANY;
+
+ cmd->ssid_from_list = 0;
+ cmd->filter = 0;
+ cmd->add_broadcast = 0;
+
+ cmd->urgency = 0;
+ cmd->protect = 0;
+
+ cmd->n_probe_reqs = wl->conf.scan.num_probe_reqs;
+ cmd->terminate_after = 0;
+
+ /* configure channels */
+ WARN_ON(req->n_ssids > 1);
+
+ cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
+ if (!cmd_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
+ req->n_channels, req->n_ssids,
+ SCAN_TYPE_SEARCH);
+ wl18xx_adjust_channels(cmd, cmd_channels);
+
+ /*
+ * all the cycles params (except total cycles) should
+ * remain 0 for normal scan
+ */
+ cmd->total_cycles = 1;
+
+ if (req->no_cck)
+ cmd->rate = WL18XX_SCAN_RATE_6;
+
+ cmd->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ if (req->n_ssids) {
+ cmd->ssid_len = req->ssids[0].ssid_len;
+ memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len);
+ }
+
+ /* TODO: per-band ies? */
+ if (cmd->active[0]) {
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ req->ie,
+ req->ie_len,
+ false);
+ if (ret < 0) {
+ wl1271_error("2.4GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ if (cmd->active[1] || cmd->dfs) {
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ req->ie,
+ req->ie_len,
+ false);
+ if (ret < 0) {
+ wl1271_error("5GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN failed");
+ goto out;
+ }
+
+out:
+ kfree(cmd_channels);
+ kfree(cmd);
+ return ret;
+}
+
+void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ wl->scan.failed = false;
+ cancel_delayed_work(&wl->scan_complete_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(0));
+}
+
+static
+int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct wl18xx_cmd_scan_params *cmd;
+ struct wlcore_scan_channels *cmd_channels = NULL;
+ struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+ int ret;
+ int filter_type;
+
+ wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
+
+ filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
+ if (filter_type < 0)
+ return filter_type;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->role_id = wlvif->role_id;
+
+ if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd->scan_type = SCAN_TYPE_PERIODIC;
+ cmd->rssi_threshold = c->rssi_threshold;
+ cmd->snr_threshold = c->snr_threshold;
+
+ /* don't filter on BSS type */
+ cmd->bss_type = SCAN_BSS_TYPE_ANY;
+
+ cmd->ssid_from_list = 1;
+ if (filter_type == SCAN_SSID_FILTER_LIST)
+ cmd->filter = 1;
+ cmd->add_broadcast = 0;
+
+ cmd->urgency = 0;
+ cmd->protect = 0;
+
+ cmd->n_probe_reqs = c->num_probe_reqs;
+ /* don't stop scanning automatically when something is found */
+ cmd->terminate_after = 0;
+
+ cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
+ if (!cmd_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* configure channels */
+ wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
+ req->n_channels, req->n_ssids,
+ SCAN_TYPE_PERIODIC);
+ wl18xx_adjust_channels(cmd, cmd_channels);
+
+ cmd->short_cycles_sec = 0;
+ cmd->long_cycles_sec = cpu_to_le16(req->interval);
+ cmd->short_cycles_count = 0;
+
+ cmd->total_cycles = 0;
+
+ cmd->tag = WL1271_SCAN_DEFAULT_TAG;
+
+ /* create a PERIODIC_SCAN_REPORT_EVENT whenever we've got a match */
+ cmd->report_threshold = 1;
+ cmd->terminate_on_report = 0;
+
+ if (cmd->active[0]) {
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ ies->ie[band],
+ ies->len[band],
+ true);
+ if (ret < 0) {
+ wl1271_error("2.4GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ if (cmd->active[1] || cmd->dfs) {
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->role_id, band,
+ req->ssids ? req->ssids[0].ssid : NULL,
+ req->ssids ? req->ssids[0].ssid_len : 0,
+ ies->ie[band],
+ ies->len[band],
+ true);
+ if (ret < 0) {
+ wl1271_error("5GHz PROBE request template failed");
+ goto out;
+ }
+ }
+
+ wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("SCAN failed");
+ goto out;
+ }
+
+out:
+ kfree(cmd_channels);
+ kfree(cmd);
+ return ret;
+}
+
+int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies);
+}
+
+static int __wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 scan_type)
+{
+ struct wl18xx_cmd_scan_stop *stop;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
+
+ stop = kzalloc(sizeof(*stop), GFP_KERNEL);
+ if (!stop) {
+ wl1271_error("failed to alloc memory to send sched scan stop");
+ return -ENOMEM;
+ }
+
+ stop->role_id = wlvif->role_id;
+ stop->scan_type = scan_type;
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, stop, sizeof(*stop), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send sched scan stop command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(stop);
+ return ret;
+}
+
+void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_PERIODIC);
+}
+int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req)
+{
+ return wl18xx_scan_send(wl, wlvif, req);
+}
+
+int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ return __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_SEARCH);
+}
diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h
new file mode 100644
index 00000000000..eadee42689d
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/scan.h
@@ -0,0 +1,127 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_SCAN_H__
+#define __WL18XX_SCAN_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/scan.h"
+
+struct tracking_ch_params {
+ struct conn_scan_ch_params channel;
+
+ __le32 bssid_lsb;
+ __le16 bssid_msb;
+
+ u8 padding[2];
+} __packed;
+
+/* probe request rate */
+enum
+{
+ WL18XX_SCAN_RATE_1 = 0,
+ WL18XX_SCAN_RATE_5_5 = 1,
+ WL18XX_SCAN_RATE_6 = 2,
+};
+
+#define WL18XX_MAX_CHANNELS_5GHZ 32
+
+struct wl18xx_cmd_scan_params {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 scan_type;
+
+ s8 rssi_threshold; /* for filtering (in dBm) */
+ s8 snr_threshold; /* for filtering (in dB) */
+
+ u8 bss_type; /* for filtering */
+ u8 ssid_from_list; /* use ssid from configured ssid list */
+ u8 filter; /* forward only results with matching ssids */
+
+ /*
+ * add broadcast ssid in addition to the configured ssids.
+ * the driver should add dummy entry for it (?).
+ */
+ u8 add_broadcast;
+
+ u8 urgency;
+ u8 protect; /* ??? */
+ u8 n_probe_reqs; /* Number of probes requests per channel */
+ u8 terminate_after; /* early terminate scan operation */
+
+ u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */
+ u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */
+ u8 dfs; /* number of dfs channels in 5ghz */
+ u8 passive_active; /* number of passive before active channels 2.4ghz */
+
+ __le16 short_cycles_sec;
+ __le16 long_cycles_sec;
+ u8 short_cycles_count;
+ u8 total_cycles; /* 0 - infinite */
+ u8 padding[2];
+
+ union {
+ struct {
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[WL18XX_MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+ };
+ struct tracking_ch_params channels_tracking[WL1271_SCAN_MAX_CHANNELS];
+ } ;
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
+ u8 tag;
+ u8 rate;
+
+ /* send SCAN_REPORT_EVENT in periodic scans after each cycle
+ * if number of results >= report_threshold. Must be 0 for
+ * non periodic scans
+ */
+ u8 report_threshold;
+
+ /* Should periodic scan stop after a report event was created.
+ * Must be 0 for non periodic scans.
+ */
+ u8 terminate_on_report;
+
+ u8 padding1[3];
+} __packed;
+
+struct wl18xx_cmd_scan_stop {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 scan_type;
+ u8 padding[2];
+} __packed;
+
+int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req);
+int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+#endif
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 5b1fb10d9fd..57c69439664 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -28,6 +28,49 @@
#include "wl18xx.h"
#include "tx.h"
+static
+void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
+ struct ieee80211_tx_rate *rate)
+{
+ u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+
+ if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
+ wl1271_error("last Tx rate invalid: %d", fw_rate);
+ rate->idx = 0;
+ rate->flags = 0;
+ return;
+ }
+
+ if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
+ rate->idx = fw_rate;
+ rate->flags = 0;
+ } else {
+ rate->flags = IEEE80211_TX_RC_MCS;
+ rate->idx = fw_rate - CONF_HW_RATE_INDEX_MCS0;
+
+ /* SGI modifier is counted as a separate rate */
+ if (fw_rate >= CONF_HW_RATE_INDEX_MCS7_SGI)
+ (rate->idx)--;
+ if (fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
+ (rate->idx)--;
+
+ /* this also covers the 40Mhz SGI case (= MCS15) */
+ if (fw_rate == CONF_HW_RATE_INDEX_MCS7_SGI ||
+ fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ if (fw_rate > CONF_HW_RATE_INDEX_MCS7_SGI && vif) {
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+ /* adjustment needed for range 0-7 */
+ rate->idx -= 8;
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+ }
+ }
+}
+
static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
{
struct ieee80211_tx_info *info;
@@ -44,7 +87,6 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
/* a zero bit indicates Tx success */
tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
-
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
@@ -56,11 +98,13 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
/* update the TX status info */
if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
+ /*
+ * first pass info->control.vif while it's valid, and then fill out
+ * the info->status structures
+ */
+ wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]);
- /* no real data about Tx completion */
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- info->status.rates[0].flags = 0;
+ info->status.rates[0].count = 1; /* no data about retries */
info->status.ack_signal = -1;
if (!tx_success)
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 96a1e438d67..b6739e79efc 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 2
-#define WL18XX_MAJOR_VER 0
-#define WL18XX_SUBTYPE_VER 0
-#define WL18XX_MINOR_VER 100
+#define WL18XX_IFTYPE_VER 5
+#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
+#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
+#define WL18XX_MINOR_VER 28
#define WL18XX_CMD_MAX_SIZE 740
@@ -49,8 +49,8 @@ struct wl18xx_priv {
/* Index of last released Tx desc in FW */
u8 last_fw_rls_idx;
- /* number of VIFs requiring extra spare mem-blocks */
- int extra_spare_vif_count;
+ /* number of keys requiring extra spare mem-blocks */
+ int extra_spare_key_count;
};
#define WL18XX_FW_MAX_TX_STATUS_DESC 33
@@ -68,7 +68,43 @@ struct wl18xx_fw_status_priv {
*/
u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
- u8 padding[2];
+ /* A bitmap representing the currently suspended links. The suspend
+ * is short lived, for multi-channel Tx requirements.
+ */
+ __le32 link_suspend_bitmap;
+
+ /* packet threshold for an "almost empty" AC,
+ * for Tx schedulng purposes
+ */
+ u8 tx_ac_threshold;
+
+ /* number of packets to queue up for a link in PS */
+ u8 tx_ps_threshold;
+
+ /* number of packet to queue up for a suspended link */
+ u8 tx_suspend_threshold;
+
+ /* Should have less than this number of packets in queue of a slow
+ * link to qualify as high priority link
+ */
+ u8 tx_slow_link_prio_threshold;
+
+ /* Should have less than this number of packets in queue of a fast
+ * link to qualify as high priority link
+ */
+ u8 tx_fast_link_prio_threshold;
+
+ /* Should have less than this number of packets in queue of a slow
+ * link before we stop queuing up packets for it.
+ */
+ u8 tx_slow_stop_threshold;
+
+ /* Should have less than this number of packets in queue of a fast
+ * link before we stop queuing up packets for it.
+ */
+ u8 tx_fast_stop_threshold;
+
+ u8 padding[3];
};
#define WL18XX_PHY_VERSION_MAX_LEN 20
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index d7b907e6717..2b832825c3d 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -33,8 +33,3 @@ config WLCORE_SDIO
If you choose to build a module, it'll be called wlcore_sdio.
Say N if unsure.
-
-config WL12XX_PLATFORM_DATA
- bool
- depends on WLCORE_SDIO != n || WL1251_SDIO != n
- default y
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index d9fba9e3213..b21398f6c3e 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -9,7 +9,4 @@ obj-$(CONFIG_WLCORE) += wlcore.o
obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
-# small builtin driver bit
-obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
-
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ce108a736bd..c7965432339 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1340,6 +1340,8 @@ out:
kfree(acx);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_set_ht_capabilities);
+
int wl1271_acx_set_ht_information(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
@@ -1433,13 +1435,22 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
acx->win_size = wl->conf.ht.rx_ba_win_size;
acx->ssn = ssn;
- ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
- sizeof(*acx));
+ ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
+ sizeof(*acx),
+ BIT(CMD_STATUS_NO_RX_BA_SESSION));
if (ret < 0) {
wl1271_warning("acx ba receiver session failed: %d", ret);
goto out;
}
+ /* sometimes we can't start the session */
+ if (ret == CMD_STATUS_NO_RX_BA_SESSION) {
+ wl1271_warning("no fw rx ba on tid %d", tid_index);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = 0;
out:
kfree(acx);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index d03215d6b3b..126536c6a39 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1025,7 +1025,6 @@ enum {
ACX_CONFIG_HANGOVER = 0x0042,
ACX_FEATURE_CFG = 0x0043,
ACX_PROTECTION_CFG = 0x0044,
- ACX_CHECKSUM_CONFIG = 0x0045,
};
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 375ea574eaf..77752b03f18 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -84,47 +84,57 @@ out:
static int wlcore_validate_fw_ver(struct wl1271 *wl)
{
unsigned int *fw_ver = wl->chip.fw_ver;
- unsigned int *min_ver = wl->min_fw_ver;
+ unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
+ wl->min_mr_fw_ver : wl->min_sr_fw_ver;
+ char min_fw_str[32] = "";
+ int i;
/* the chip must be exactly equal */
- if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])
+ if ((min_ver[FW_VER_CHIP] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP]))
goto fail;
- /* always check the next digit if all previous ones are equal */
-
- if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE])
- goto out;
- else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE])
+ /* the firmware type must be equal */
+ if ((min_ver[FW_VER_IF_TYPE] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_IF_TYPE] != fw_ver[FW_VER_IF_TYPE]))
goto fail;
- if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR])
- goto out;
- else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])
+ /* the project number must be equal */
+ if ((min_ver[FW_VER_SUBTYPE] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_SUBTYPE] != fw_ver[FW_VER_SUBTYPE]))
goto fail;
- if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE])
- goto out;
- else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE])
+ /* the API version must be greater or equal */
+ if ((min_ver[FW_VER_MAJOR] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR]))
goto fail;
- if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR])
- goto out;
- else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])
+ /* if the API version is equal... */
+ if (((min_ver[FW_VER_MAJOR] == WLCORE_FW_VER_IGNORE) ||
+ (min_ver[FW_VER_MAJOR] == fw_ver[FW_VER_MAJOR])) &&
+ /* ...the minor must be greater or equal */
+ ((min_ver[FW_VER_MINOR] != WLCORE_FW_VER_IGNORE) &&
+ (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])))
goto fail;
-out:
return 0;
fail:
- wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n"
- "Please use at least FW %u.%u.%u.%u.%u.\n"
- "You can get more information at:\n"
- "http://wireless.kernel.org/en/users/Drivers/wl12xx",
+ for (i = 0; i < NUM_FW_VER; i++)
+ if (min_ver[i] == WLCORE_FW_VER_IGNORE)
+ snprintf(min_fw_str, sizeof(min_fw_str),
+ "%s*.", min_fw_str);
+ else
+ snprintf(min_fw_str, sizeof(min_fw_str),
+ "%s%u.", min_fw_str, min_ver[i]);
+
+ wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
+ "Please use at least FW %s\n"
+ "You can get the latest firmwares at:\n"
+ "git://github.com/TI-OpenLink/firmwares.git",
fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
- fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP],
- min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR],
- min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]);
+ fw_ver[FW_VER_MINOR], min_fw_str);
return -EINVAL;
}
@@ -491,7 +501,7 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
if (ret < 0)
return ret;
- wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
+ wl->mbox_ptr[1] = wl->mbox_ptr[0] + wl->mbox_size;
wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
@@ -508,23 +518,6 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
*/
/* unmask required mbox events */
- wl->event_mask = BSS_LOSE_EVENT_ID |
- REGAINED_BSS_EVENT_ID |
- SCAN_COMPLETE_EVENT_ID |
- ROLE_STOP_COMPLETE_EVENT_ID |
- RSSI_SNR_TRIGGER_0_EVENT_ID |
- PSPOLL_DELIVERY_FAILURE_EVENT_ID |
- SOFT_GEMINI_SENSE_EVENT_ID |
- PERIODIC_SCAN_REPORT_EVENT_ID |
- PERIODIC_SCAN_COMPLETE_EVENT_ID |
- DUMMY_PACKET_EVENT_ID |
- PEER_REMOVE_COMPLETE_EVENT_ID |
- BA_SESSION_RX_CONSTRAINT_EVENT_ID |
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
- INACTIVE_STA_EVENT_ID |
- MAX_TX_RETRY_EVENT_ID |
- CHANNEL_SWITCH_COMPLETE_EVENT_ID;
-
ret = wl1271_event_unmask(wl);
if (ret < 0) {
wl1271_error("EVENT mask setting failed");
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 27f83f72a93..6331f9e1cb3 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -48,14 +48,15 @@
* @id: command id
* @buf: buffer containing the command, must work with dma
* @len: length of the buffer
+ * return the cmd status code on success.
*/
-int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
- size_t res_len)
+static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, size_t res_len)
{
struct wl1271_cmd_header *cmd;
unsigned long timeout;
u32 intr;
- int ret = 0;
+ int ret;
u16 status;
u16 poll_count = 0;
@@ -71,7 +72,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
if (ret < 0)
- goto fail;
+ return ret;
/*
* TODO: we just need this because one bit is in a different
@@ -79,19 +80,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
*/
ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
if (ret < 0)
- goto fail;
+ return ret;
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
- goto fail;
+ return ret;
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
- ret = -ETIMEDOUT;
- goto fail;
+ return -ETIMEDOUT;
}
poll_count++;
@@ -102,7 +102,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
- goto fail;
+ return ret;
}
/* read back the status code of the command */
@@ -111,33 +111,66 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
if (ret < 0)
- goto fail;
+ return ret;
status = le16_to_cpu(cmd->status);
- if (status != CMD_STATUS_SUCCESS) {
- wl1271_error("command execute failure %d", status);
- ret = -EIO;
- goto fail;
- }
ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
WL1271_ACX_INTR_CMD_COMPLETE);
if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+/*
+ * send command to fw and return cmd status on success
+ * valid_rets contains a bitmap of allowed error codes
+ */
+int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
+ size_t res_len, unsigned long valid_rets)
+{
+ int ret = __wlcore_cmd_send(wl, id, buf, len, res_len);
+
+ if (ret < 0)
goto fail;
- return 0;
+ /* success is always a valid status */
+ valid_rets |= BIT(CMD_STATUS_SUCCESS);
+ if (ret >= MAX_COMMAND_STATUS ||
+ !test_bit(ret, &valid_rets)) {
+ wl1271_error("command execute failure %d", ret);
+ ret = -EIO;
+ goto fail;
+ }
+ return ret;
fail:
wl12xx_queue_recovery_work(wl);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_cmd_send);
+
+/*
+ * wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS
+ * return 0 on success.
+ */
+int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
+ size_t res_len)
+{
+ int ret = wlcore_cmd_send_failsafe(wl, id, buf, len, res_len, 0);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
/*
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
- u32 mask, bool *timeout)
+int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+ u32 mask, bool *timeout)
{
u32 *events_vector;
u32 event;
@@ -187,20 +220,7 @@ out:
kfree(events_vector);
return ret;
}
-
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
-{
- int ret;
- bool timeout = false;
-
- ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
- if (ret != 0 || timeout) {
- wl12xx_queue_recovery_work(wl);
- return ret;
- }
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(wlcore_cmd_wait_for_event_or_timeout);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id)
@@ -278,6 +298,16 @@ out:
return ret;
}
+static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
+{
+ if (wl->session_ids[hlid] >= SESSION_COUNTER_MAX)
+ wl->session_ids[hlid] = 0;
+
+ wl->session_ids[hlid]++;
+
+ return wl->session_ids[hlid];
+}
+
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
@@ -285,12 +315,21 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
+ wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
+
/* these bits are used by op_tx */
spin_lock_irqsave(&wl->wl_lock, flags);
__set_bit(link, wl->links_map);
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /* take the last "freed packets" value from the current FW status */
+ wl->links[link].prev_freed_pkts =
+ wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+ wl->links[link].wlvif = wlvif;
*hlid = link;
+
+ wl->active_link_count++;
return 0;
}
@@ -307,24 +346,21 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
__clear_bit(*hlid, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+ wl->links[*hlid].allocated_pkts = 0;
+ wl->links[*hlid].prev_freed_pkts = 0;
+ wl->links[*hlid].ba_bitmap = 0;
+ memset(wl->links[*hlid].addr, 0, ETH_ALEN);
+
/*
* At this point op_tx() will not add more packets to the queues. We
* can purge them.
*/
wl1271_tx_reset_link_queues(wl, *hlid);
+ wl->links[*hlid].wlvif = NULL;
*hlid = WL12XX_INVALID_LINK_ID;
-}
-
-static int wl12xx_get_new_session_id(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
-{
- if (wlvif->session_counter >= SESSION_COUNTER_MAX)
- wlvif->session_counter = 0;
-
- wlvif->session_counter++;
-
- return wlvif->session_counter;
+ wl->active_link_count--;
+ WARN_ON_ONCE(wl->active_link_count < 0);
}
static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
@@ -345,7 +381,9 @@ static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
}
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ enum ieee80211_band band,
+ int channel)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -359,9 +397,9 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
cmd->role_id = wlvif->dev_role_id;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (band == IEEE80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
- cmd->channel = wlvif->channel;
+ cmd->channel = channel;
if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
@@ -369,7 +407,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
goto out_free;
}
cmd->device.hlid = wlvif->dev_hlid;
- cmd->device.session = wl12xx_get_new_session_id(wl, wlvif);
+ cmd->device.session = wl->session_ids[wlvif->dev_hlid];
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -420,12 +458,6 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
goto out_free;
}
- ret = wl1271_cmd_wait_for_event(wl, ROLE_STOP_COMPLETE_EVENT_ID);
- if (ret < 0) {
- wl1271_error("cmd role stop dev event completion error");
- goto out_free;
- }
-
wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
@@ -439,6 +471,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
+ u32 supported_rates;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -459,7 +492,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->sta.ssid_len = wlvif->ssid_len;
memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
+
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
+ wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
+ if (wlvif->p2p)
+ supported_rates &= ~CONF_TX_CCK_RATES;
+
+ cmd->sta.local_rates = cpu_to_le32(supported_rates);
+
cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
@@ -468,8 +508,14 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto out_free;
}
cmd->sta.hlid = wlvif->sta.hlid;
- cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
- cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
+ cmd->sta.session = wl->session_ids[wlvif->sta.hlid];
+ /*
+ * We don't have the correct remote rates in this stage. The
+ * rates will be reconfigured later, after association, if the
+ * firmware supports ACX_PEER_CAP. Otherwise, there's nothing
+ * we can do, so use all supported_rates here.
+ */
+ cmd->sta.remote_rates = cpu_to_le32(supported_rates);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
@@ -482,6 +528,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto err_hlid;
}
+ wlvif->sta.role_chan_type = wlvif->channel_type;
goto out_free;
err_hlid:
@@ -500,7 +547,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- bool timeout = false;
if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
@@ -523,17 +569,6 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto out_free;
}
- /*
- * Sometimes the firmware doesn't send this event, so we just
- * time out without failing. Queue recovery for other
- * failures.
- */
- ret = wl1271_cmd_wait_for_event_or_timeout(wl,
- ROLE_STOP_COMPLETE_EVENT_ID,
- &timeout);
- if (ret)
- wl12xx_queue_recovery_work(wl);
-
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
@@ -579,12 +614,15 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
cmd->ap.global_hlid = wlvif->ap.global_hlid;
cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+ cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid];
+ cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid];
cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
/* FIXME: Change when adding DFS */
cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
+ cmd->ap.wmm = wlvif->wmm_enabled;
cmd->channel = wlvif->channel;
cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
@@ -599,8 +637,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
}
- supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
+ supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+ if (wlvif->p2p)
+ supported_rates &= ~CONF_TX_CCK_RATES;
wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
supported_rates);
@@ -799,8 +839,11 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
* @id: acx id
* @buf: buffer containing acx, including all headers, must work with dma
* @len: length of buf
+ * @valid_rets: bitmap of valid cmd status codes (i.e. return values).
+ * return the cmd status on success.
*/
-int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, unsigned long valid_rets)
{
struct acx_header *acx = buf;
int ret;
@@ -812,12 +855,26 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
/* payload length, does not include any headers */
acx->len = cpu_to_le16(len - sizeof(*acx));
- ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0);
+ ret = wlcore_cmd_send_failsafe(wl, CMD_CONFIGURE, acx, len, 0,
+ valid_rets);
if (ret < 0) {
wl1271_warning("CONFIGURE command NOK");
return ret;
}
+ return ret;
+}
+
+/*
+ * wrapper for wlcore_cmd_configure that accepts only success status.
+ * return 0 on success
+ */
+int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
+{
+ int ret = wlcore_cmd_configure_failsafe(wl, id, buf, len, 0);
+
+ if (ret < 0)
+ return ret;
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
@@ -1034,8 +1091,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb;
int ret;
u32 rate;
- u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
- u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
+ u16 template_id_2_4 = wl->scan_templ_id_2_4;
+ u16 template_id_5 = wl->scan_templ_id_5;
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie_len);
@@ -1048,10 +1105,10 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
- if (!sched_scan &&
+ if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
- template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4;
- template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5;
+ template_id_2_4 = wl->sched_scan_templ_id_2_4;
+ template_id_5 = wl->sched_scan_templ_id_5;
}
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
@@ -1068,6 +1125,7 @@ out:
dev_kfree_skb(skb);
return ret;
}
+EXPORT_SYMBOL_GPL(wl12xx_cmd_build_probe_req);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
@@ -1379,7 +1437,8 @@ out:
return ret;
}
-int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid)
{
struct wl12xx_cmd_set_peer_state *cmd;
int ret = 0;
@@ -1395,6 +1454,10 @@ int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
cmd->hlid = hlid;
cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
+ /* wmm param is valid only for station role */
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ cmd->wmm = wlvif->wmm_enabled;
+
ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send set peer state command");
@@ -1429,6 +1492,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->hlid = hlid;
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
+ cmd->session_id = wl->session_ids[hlid];
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1490,9 +1554,10 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
goto out_free;
}
- ret = wl1271_cmd_wait_for_event_or_timeout(wl,
- PEER_REMOVE_COMPLETE_EVENT_ID,
- &timeout);
+ ret = wl->ops->wait_for_event(wl,
+ WLCORE_EVENT_PEER_REMOVE_COMPLETE,
+ &timeout);
+
/*
* We are ok with a timeout here. The event is sometimes not sent
* due to a firmware bug. In case of another error (like SDIO timeout)
@@ -1508,6 +1573,131 @@ out:
return ret;
}
+static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+{
+ int idx = -1;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ if (ch >= 8 && ch <= 16)
+ idx = ((ch-8)/4 + 18);
+ else if (ch >= 34 && ch <= 64)
+ idx = ((ch-34)/2 + 3 + 18);
+ else if (ch >= 100 && ch <= 140)
+ idx = ((ch-100)/4 + 15 + 18);
+ else if (ch >= 149 && ch <= 165)
+ idx = ((ch-149)/4 + 26 + 18);
+ else
+ idx = -1;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (ch >= 1 && ch <= 14)
+ idx = ch - 1;
+ else
+ idx = -1;
+ break;
+ default:
+ wl1271_error("get reg conf ch idx - unknown band: %d",
+ (int)band);
+ }
+
+ return idx;
+}
+
+void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
+ enum ieee80211_band band)
+{
+ int ch_bit_idx = 0;
+
+ if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+ return;
+
+ ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
+
+ if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+ set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
+}
+
+int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
+ int ret = 0, i, b, ch_bit_idx;
+ struct ieee80211_channel *channel;
+ u32 tmp_ch_bitmap[2];
+ u16 ch;
+ struct wiphy *wiphy = wl->hw->wiphy;
+ struct ieee80211_supported_band *band;
+ bool timeout = false;
+
+ if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+ return 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd reg domain config");
+
+ memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
+
+ for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+ band = wiphy->bands[b];
+ for (i = 0; i < band->n_channels; i++) {
+ channel = &band->channels[i];
+ ch = channel->hw_value;
+
+ if (channel->flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_PASSIVE_SCAN))
+ continue;
+
+ ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
+ if (ch_bit_idx < 0)
+ continue;
+
+ set_bit(ch_bit_idx, (long *)tmp_ch_bitmap);
+ }
+ }
+
+ tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0];
+ tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1];
+
+ if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap)))
+ goto out;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
+ cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
+
+ wl1271_debug(DEBUG_CMD,
+ "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x",
+ cmd->ch_bit_map1, cmd->ch_bit_map2);
+
+ ret = wl1271_cmd_send(wl, CMD_DFS_CHANNEL_CONFIG, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send reg domain dfs config");
+ goto out;
+ }
+
+ ret = wl->ops->wait_for_event(wl,
+ WLCORE_EVENT_DFS_CONFIG_COMPLETE,
+ &timeout);
+ if (ret < 0 || timeout) {
+ wl1271_error("reg domain conf %serror",
+ timeout ? "completion " : "");
+ ret = timeout ? -ETIMEDOUT : ret;
+ goto out;
+ }
+
+ memcpy(wl->reg_ch_conf_last, tmp_ch_bitmap, sizeof(tmp_ch_bitmap));
+ memset(wl->reg_ch_conf_pending, 0, sizeof(wl->reg_ch_conf_pending));
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
{
struct wl12xx_cmd_config_fwlog *cmd;
@@ -1593,12 +1783,12 @@ out:
}
static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- u8 role_id)
+ u8 role_id, enum ieee80211_band band, u8 channel)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
@@ -1610,8 +1800,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
cmd->role_id = role_id;
- cmd->channel = wlvif->channel;
- switch (wlvif->band) {
+ cmd->channel = channel;
+ switch (band) {
case IEEE80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
@@ -1666,30 +1856,18 @@ out:
return ret;
}
-int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
+ enum ieee80211_band band, u8 channel)
{
int ret = 0;
- bool is_first_roc;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
- is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
- WL12XX_MAX_ROLES);
-
- ret = wl12xx_cmd_roc(wl, wlvif, role_id);
+ ret = wl12xx_cmd_roc(wl, wlvif, role_id, band, channel);
if (ret < 0)
goto out;
- if (is_first_roc) {
- ret = wl1271_cmd_wait_for_event(wl,
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
- if (ret < 0) {
- wl1271_error("cmd roc event completion error");
- goto out;
- }
- }
-
__set_bit(role_id, wl->roc_map);
out:
return ret;
@@ -1719,43 +1897,7 @@ out:
return ret;
}
-int wl12xx_cmd_channel_switch(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct wl12xx_cmd_channel_switch *cmd;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "cmd channel switch");
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
-
- cmd->role_id = wlvif->role_id;
- cmd->channel = ch_switch->channel->hw_value;
- cmd->switch_time = ch_switch->count;
- cmd->stop_tx = ch_switch->block_tx;
-
- /* FIXME: control from mac80211 in the future */
- cmd->post_switch_tx_disable = 0; /* Enable TX on the target channel */
-
- ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("failed to send channel switch command");
- goto out_free;
- }
-
-out_free:
- kfree(cmd);
-
-out:
- return ret;
-}
-
-int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_stop_channel_switch *cmd;
int ret;
@@ -1768,6 +1910,8 @@ int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl)
goto out;
}
+ cmd->role_id = wlvif->role_id;
+
ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to stop channel switch command");
@@ -1782,7 +1926,8 @@ out:
}
/* start dev role and roc on its channel */
-int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum ieee80211_band band, int channel)
{
int ret;
@@ -1797,11 +1942,11 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (ret < 0)
goto out;
- ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
if (ret < 0)
goto out_disable;
- ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+ ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id, band, channel);
if (ret < 0)
goto out_stop;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 2409f3d71f6..fd34123047c 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -31,6 +31,8 @@ struct acx_header;
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
+int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
+ size_t res_len, unsigned long valid_rets);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
@@ -39,11 +41,14 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum ieee80211_band band, int channel);
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
+int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, unsigned long valid_rets);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 ps_mode, u16 auto_ps_timeout);
@@ -75,22 +80,30 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
-int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
+int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
+ enum ieee80211_band band, u8 channel);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
+ enum ieee80211_band band);
+int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch);
-int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 *hlid);
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
+int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+ u32 mask, bool *timeout);
enum wl1271_commands {
CMD_INTERROGATE = 1, /* use this to read information elements */
@@ -149,8 +162,11 @@ enum wl1271_commands {
CMD_WFD_START_DISCOVERY = 45,
CMD_WFD_STOP_DISCOVERY = 46,
CMD_WFD_ATTRIBUTE_CONFIG = 47,
- CMD_NOP = 48,
- CMD_LAST_COMMAND,
+ CMD_GENERIC_CFG = 48,
+ CMD_NOP = 49,
+
+ /* start of 18xx specific commands */
+ CMD_DFS_CHANNEL_CONFIG = 60,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -167,8 +183,8 @@ enum cmd_templ {
CMD_TEMPL_PS_POLL,
CMD_TEMPL_KLV,
CMD_TEMPL_DISCONNECT,
- CMD_TEMPL_APP_PROBE_REQ_2_4,
- CMD_TEMPL_APP_PROBE_REQ_5,
+ CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY,
+ CMD_TEMPL_APP_PROBE_REQ_5_LEGACY,
CMD_TEMPL_BAR, /* for firmware internal use only */
CMD_TEMPL_CTS, /*
* For CTS-to-self (FastCTS) mechanism
@@ -179,6 +195,8 @@ enum cmd_templ {
CMD_TEMPL_DEAUTH_AP,
CMD_TEMPL_TEMPORARY,
CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+ CMD_TEMPL_PROBE_REQ_2_4_PERIODIC,
+ CMD_TEMPL_PROBE_REQ_5_PERIODIC,
CMD_TEMPL_MAX = 0xff
};
@@ -220,7 +238,8 @@ enum {
CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/
CMD_STATUS_TEMPLATE_OOM = 23,
CMD_STATUS_NO_RX_BA_SESSION = 24,
- MAX_COMMAND_STATUS = 0xff
+
+ MAX_COMMAND_STATUS
};
#define CMDMBOX_HEADER_LEN 4
@@ -345,7 +364,15 @@ struct wl12xx_cmd_role_start {
u8 reset_tsf;
- u8 padding_1[4];
+ /*
+ * ap supports wmm (note that there is additional
+ * per-sta wmm configuration)
+ */
+ u8 wmm;
+
+ u8 bcast_session_id;
+ u8 global_session_id;
+ u8 padding_1[1];
} __packed ap;
};
} __packed;
@@ -515,7 +542,14 @@ struct wl12xx_cmd_set_peer_state {
u8 hlid;
u8 state;
- u8 padding[2];
+
+ /*
+ * wmm is relevant for sta role only.
+ * ap role configures the per-sta wmm params in
+ * the add_peer command.
+ */
+ u8 wmm;
+ u8 padding[1];
} __packed;
struct wl12xx_cmd_roc {
@@ -558,7 +592,7 @@ struct wl12xx_cmd_add_peer {
u8 bss_index;
u8 sp_len;
u8 wmm;
- u8 padding1;
+ u8 session_id;
} __packed;
struct wl12xx_cmd_remove_peer {
@@ -597,6 +631,13 @@ enum wl12xx_fwlogger_output {
WL12XX_FWLOG_OUTPUT_HOST,
};
+struct wl12xx_cmd_regdomain_dfs_config {
+ struct wl1271_cmd_header header;
+
+ __le32 ch_bit_map1;
+ __le32 ch_bit_map2;
+} __packed;
+
struct wl12xx_cmd_config_fwlog {
struct wl1271_cmd_header header;
@@ -626,27 +667,13 @@ struct wl12xx_cmd_stop_fwlog {
struct wl1271_cmd_header header;
} __packed;
-struct wl12xx_cmd_channel_switch {
+struct wl12xx_cmd_stop_channel_switch {
struct wl1271_cmd_header header;
u8 role_id;
-
- /* The new serving channel */
- u8 channel;
- /* Relative time of the serving channel switch in TBTT units */
- u8 switch_time;
- /* Stop the role TX, should expect it after radar detection */
- u8 stop_tx;
- /* The target channel tx status 1-stopped 0-open*/
- u8 post_switch_tx_disable;
-
u8 padding[3];
} __packed;
-struct wl12xx_cmd_stop_channel_switch {
- struct wl1271_cmd_header header;
-} __packed;
-
/* Used to check radio status after calibration */
#define MAX_TLV_LENGTH 500
#define TEST_CMD_P2G_CAL 2 /* TX BiP */
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 9e40760bafe..2b96ff82134 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -57,20 +57,49 @@ enum {
};
enum {
- CONF_HW_RATE_INDEX_1MBPS = 0,
- CONF_HW_RATE_INDEX_2MBPS = 1,
- CONF_HW_RATE_INDEX_5_5MBPS = 2,
- CONF_HW_RATE_INDEX_6MBPS = 3,
- CONF_HW_RATE_INDEX_9MBPS = 4,
- CONF_HW_RATE_INDEX_11MBPS = 5,
- CONF_HW_RATE_INDEX_12MBPS = 6,
- CONF_HW_RATE_INDEX_18MBPS = 7,
- CONF_HW_RATE_INDEX_22MBPS = 8,
- CONF_HW_RATE_INDEX_24MBPS = 9,
- CONF_HW_RATE_INDEX_36MBPS = 10,
- CONF_HW_RATE_INDEX_48MBPS = 11,
- CONF_HW_RATE_INDEX_54MBPS = 12,
- CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
+ CONF_HW_RATE_INDEX_1MBPS = 0,
+ CONF_HW_RATE_INDEX_2MBPS = 1,
+ CONF_HW_RATE_INDEX_5_5MBPS = 2,
+ CONF_HW_RATE_INDEX_11MBPS = 3,
+ CONF_HW_RATE_INDEX_6MBPS = 4,
+ CONF_HW_RATE_INDEX_9MBPS = 5,
+ CONF_HW_RATE_INDEX_12MBPS = 6,
+ CONF_HW_RATE_INDEX_18MBPS = 7,
+ CONF_HW_RATE_INDEX_24MBPS = 8,
+ CONF_HW_RATE_INDEX_36MBPS = 9,
+ CONF_HW_RATE_INDEX_48MBPS = 10,
+ CONF_HW_RATE_INDEX_54MBPS = 11,
+ CONF_HW_RATE_INDEX_MCS0 = 12,
+ CONF_HW_RATE_INDEX_MCS1 = 13,
+ CONF_HW_RATE_INDEX_MCS2 = 14,
+ CONF_HW_RATE_INDEX_MCS3 = 15,
+ CONF_HW_RATE_INDEX_MCS4 = 16,
+ CONF_HW_RATE_INDEX_MCS5 = 17,
+ CONF_HW_RATE_INDEX_MCS6 = 18,
+ CONF_HW_RATE_INDEX_MCS7 = 19,
+ CONF_HW_RATE_INDEX_MCS7_SGI = 20,
+ CONF_HW_RATE_INDEX_MCS0_40MHZ = 21,
+ CONF_HW_RATE_INDEX_MCS1_40MHZ = 22,
+ CONF_HW_RATE_INDEX_MCS2_40MHZ = 23,
+ CONF_HW_RATE_INDEX_MCS3_40MHZ = 24,
+ CONF_HW_RATE_INDEX_MCS4_40MHZ = 25,
+ CONF_HW_RATE_INDEX_MCS5_40MHZ = 26,
+ CONF_HW_RATE_INDEX_MCS6_40MHZ = 27,
+ CONF_HW_RATE_INDEX_MCS7_40MHZ = 28,
+ CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI = 29,
+
+ /* MCS8+ rates overlap with 40Mhz rates */
+ CONF_HW_RATE_INDEX_MCS8 = 21,
+ CONF_HW_RATE_INDEX_MCS9 = 22,
+ CONF_HW_RATE_INDEX_MCS10 = 23,
+ CONF_HW_RATE_INDEX_MCS11 = 24,
+ CONF_HW_RATE_INDEX_MCS12 = 25,
+ CONF_HW_RATE_INDEX_MCS13 = 26,
+ CONF_HW_RATE_INDEX_MCS14 = 27,
+ CONF_HW_RATE_INDEX_MCS15 = 28,
+ CONF_HW_RATE_INDEX_MCS15_SGI = 29,
+
+ CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI,
};
#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff
@@ -415,11 +444,11 @@ struct conf_rx_settings {
#define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS
/*
- * Rates supported for data packets when operating as AP. Note the absence
+ * Rates supported for data packets when operating as STA/AP. Note the absence
* of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
* one. The rate dropped is not mandatory under any operating mode.
*/
-#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
+#define CONF_TX_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
@@ -677,6 +706,18 @@ struct conf_tx_settings {
/* Time in ms for Tx watchdog timer to expire */
u32 tx_watchdog_timeout;
+
+ /*
+ * when a slow link has this much packets pending, it becomes a low
+ * priority link, scheduling-wise
+ */
+ u8 slow_link_thold;
+
+ /*
+ * when a fast link has this much packets pending, it becomes a low
+ * priority link, scheduling-wise
+ */
+ u8 fast_link_thold;
} __packed;
enum {
@@ -1047,6 +1088,7 @@ struct conf_roam_trigger_settings {
struct conf_scan_settings {
/*
* The minimum time to wait on each channel for active scans
+ * This value will be used whenever there's a connected interface.
*
* Range: u32 tu/1000
*/
@@ -1054,24 +1096,37 @@ struct conf_scan_settings {
/*
* The maximum time to wait on each channel for active scans
+ * This value will be currently used whenever there's a
+ * connected interface. It shouldn't exceed 30000 (~30ms) to avoid
+ * possible interference of voip traffic going on while scanning.
*
* Range: u32 tu/1000
*/
u32 max_dwell_time_active;
- /*
- * The minimum time to wait on each channel for passive scans
+ /* The minimum time to wait on each channel for active scans
+ * when it's possible to have longer scan dwell times.
+ * Currently this is used whenever we're idle on all interfaces.
+ * Longer dwell times improve detection of networks within a
+ * single scan.
*
* Range: u32 tu/1000
*/
- u32 min_dwell_time_passive;
+ u32 min_dwell_time_active_long;
- /*
- * The maximum time to wait on each channel for passive scans
+ /* The maximum time to wait on each channel for active scans
+ * when it's possible to have longer scan dwell times.
+ * See min_dwell_time_active_long
*
* Range: u32 tu/1000
*/
- u32 max_dwell_time_passive;
+ u32 max_dwell_time_active_long;
+
+ /* time to wait on the channel for passive scans (in TU/1000) */
+ u32 dwell_time_passive;
+
+ /* time to wait on the channel for DFS scans (in TU/1000) */
+ u32 dwell_time_dfs;
/*
* Number of probe requests to transmit on each active scan channel
@@ -1276,12 +1331,20 @@ struct conf_hangover_settings {
u8 window_size;
} __packed;
+struct conf_recovery_settings {
+ /* BUG() on fw recovery */
+ u8 bug_on_recovery;
+
+ /* Prevent HW recovery. FW will remain stuck. */
+ u8 no_recovery;
+} __packed;
+
/*
* The conf version consists of 4 bytes. The two MSB are the wlcore
* version, the two LSB are the lower driver's private conf
* version.
*/
-#define WLCORE_CONF_VERSION (0x0002 << 16)
+#define WLCORE_CONF_VERSION (0x0005 << 16)
#define WLCORE_CONF_MASK 0xffff0000
#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
sizeof(struct wlcore_conf))
@@ -1309,6 +1372,7 @@ struct wlcore_conf {
struct conf_fwlog fwlog;
struct conf_rate_policy_settings rate;
struct conf_hangover_settings hangover;
+ struct conf_recovery_settings recovery;
} __packed;
struct wlcore_conf_file {
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index c86bb00c248..e70a7c86486 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -490,7 +490,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_HEX(chip.id);
DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
- DRIVER_STATE_PRINT_INT(sched_scanning);
+ DRIVER_STATE_PRINT_INT(recovery_count);
#undef DRIVER_STATE_PRINT_INT
#undef DRIVER_STATE_PRINT_LONG
@@ -560,7 +560,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
VIF_STATE_PRINT_INT(sta.hlid);
- VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
VIF_STATE_PRINT_INT(sta.basic_rate_idx);
VIF_STATE_PRINT_INT(sta.ap_rate_idx);
VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
@@ -577,6 +576,10 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
}
VIF_STATE_PRINT_INT(last_tx_hlid);
+ VIF_STATE_PRINT_INT(tx_queue_count[0]);
+ VIF_STATE_PRINT_INT(tx_queue_count[1]);
+ VIF_STATE_PRINT_INT(tx_queue_count[2]);
+ VIF_STATE_PRINT_INT(tx_queue_count[3]);
VIF_STATE_PRINT_LHEX(links_map[0]);
VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
VIF_STATE_PRINT_INT(band);
@@ -589,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(beacon_int);
VIF_STATE_PRINT_INT(default_key);
VIF_STATE_PRINT_INT(aid);
- VIF_STATE_PRINT_INT(session_counter);
VIF_STATE_PRINT_INT(psm_entry_retry);
VIF_STATE_PRINT_INT(power_level);
VIF_STATE_PRINT_INT(rssi_thold);
@@ -993,7 +995,7 @@ static ssize_t sleep_auth_write(struct file *file,
return -EINVAL;
}
- if (value < 0 || value > WL1271_PSM_MAX) {
+ if (value > WL1271_PSM_MAX) {
wl1271_warning("sleep_auth must be between 0 and %d",
WL1271_PSM_MAX);
return -ERANGE;
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 48907054d49..70f289aa1bc 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -29,34 +29,39 @@
#include "scan.h"
#include "wl12xx_80211.h"
-static void wl1271_event_rssi_trigger(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct event_mailbox *mbox)
+void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
{
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
enum nl80211_cqm_rssi_threshold_event event;
- s8 metric = mbox->rssi_snr_trigger_metric[0];
+ s8 metric = metric_arr[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
- if (metric <= wlvif->rssi_thold)
- event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
- else
- event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
-
- if (event != wlvif->last_rssi_event)
- ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
- wlvif->last_rssi_event = event;
+ /* TODO: check actual multi-role support */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (metric <= wlvif->rssi_thold)
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ if (event != wlvif->last_rssi_event)
+ ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ wlvif->last_rssi_event = event;
+ }
}
+EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
- if (!wlvif->sta.ba_rx_bitmap)
+ u8 hlid = wlvif->sta.hlid;
+ if (!wl->links[hlid].ba_bitmap)
return;
- ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+ ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
vif->bss_conf.bssid);
} else {
u8 hlid;
@@ -74,8 +79,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
}
-static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
- u8 enable)
+void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
{
struct wl12xx_vif *wlvif;
@@ -87,201 +91,169 @@ static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
wl1271_recalc_rx_streaming(wl, wlvif);
}
}
-
}
+EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
-static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
+void wlcore_event_sched_scan_completed(struct wl1271 *wl,
+ u8 status)
{
- wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
- wl1271_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector);
- wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
+ wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
+ status);
+
+ if (wl->sched_vif) {
+ ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_vif = NULL;
+ }
}
+EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
-static int wl1271_event_process(struct wl1271 *wl)
+void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ unsigned long allowed_bitmap)
{
- struct event_mailbox *mbox = wl->mbox;
- struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
- u32 vector;
- bool disconnect_sta = false;
- unsigned long sta_bitmap = 0;
- int ret;
-
- wl1271_event_mbox_dump(mbox);
-
- vector = le32_to_cpu(mbox->events_vector);
- vector &= ~(le32_to_cpu(mbox->events_mask));
- wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
- if (vector & SCAN_COMPLETE_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "status: 0x%x",
- mbox->scheduled_scan_status);
-
- wl1271_scan_stm(wl, wl->scan_vif);
- }
+ wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
+ __func__, roles_bitmap, allowed_bitmap);
- if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_REPORT_EVENT "
- "(status 0x%0x)", mbox->scheduled_scan_status);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+ !test_bit(wlvif->role_id , &roles_bitmap))
+ continue;
- wl1271_scan_sched_scan_results(wl);
+ wlvif->ba_allowed = !!test_bit(wlvif->role_id,
+ &allowed_bitmap);
+ if (!wlvif->ba_allowed)
+ wl1271_stop_ba_event(wl, wlvif);
}
+}
+EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
- if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
- "(status 0x%0x)", mbox->scheduled_scan_status);
- if (wl->sched_scanning) {
- ieee80211_sched_scan_stopped(wl->hw);
- wl->sched_scanning = false;
- }
- }
+void wlcore_event_channel_switch(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ bool success)
+{
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
- if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
- wl12xx_event_soft_gemini_sense(wl,
- mbox->soft_gemini_sense_info);
+ wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
+ __func__, roles_bitmap, success);
- /*
- * We are HW_MONITOR device. On beacon loss - queue
- * connection loss work. Cancel it on REGAINED event.
- */
- if (vector & BSS_LOSE_EVENT_ID) {
- /* TODO: check for multi-role */
- int delay = wl->conf.conn.synch_fail_thold *
- wl->conf.conn.bss_lose_timeout;
- wl1271_info("Beacon loss detected.");
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+ !test_bit(wlvif->role_id , &roles_bitmap))
+ continue;
- /*
- * if the work is already queued, it should take place. We
- * don't want to delay the connection loss indication
- * any more.
- */
- ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
- msecs_to_jiffies(delay));
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+ &wlvif->flags))
+ continue;
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- vif = wl12xx_wlvif_to_vif(wlvif);
+ vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_cqm_rssi_notify(
- vif,
- NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
- GFP_KERNEL);
- }
+ ieee80211_chswitch_done(vif, success);
+ cancel_delayed_work(&wlvif->channel_switch_work);
}
+}
+EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
- if (vector & REGAINED_BSS_EVENT_ID) {
- /* TODO: check for multi-role */
- wl1271_info("Beacon regained.");
- cancel_delayed_work(&wl->connection_loss_work);
-
- /* sanity check - we can't lose and gain the beacon together */
- WARN(vector & BSS_LOSE_EVENT_ID,
- "Concurrent beacon loss and gain from FW");
- }
+void wlcore_event_dummy_packet(struct wl1271 *wl)
+{
+ wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
+ wl1271_tx_dummy_packet(wl);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
- if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
- /* TODO: check actual multi-role support */
- wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- wl1271_event_rssi_trigger(wl, wlvif, mbox);
+static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+ u32 num_packets = wl->conf.tx.max_tx_retries;
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ const u8 *addr;
+ int h;
+
+ for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ bool found = false;
+ /* find the ap vif connected to this sta */
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ if (!test_bit(h, wlvif->ap.sta_hlid_map))
+ continue;
+ found = true;
+ break;
}
- }
+ if (!found)
+ continue;
- if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
- u8 role_id = mbox->role_id;
- wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x, role_id=%d",
- mbox->rx_ba_allowed, role_id);
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ addr = wl->links[h].addr;
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (role_id != 0xff && role_id != wlvif->role_id)
- continue;
-
- wlvif->ba_allowed = !!mbox->rx_ba_allowed;
- if (!wlvif->ba_allowed)
- wl1271_stop_ba_event(wl, wlvif);
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, addr);
+ if (sta) {
+ wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
+ ieee80211_report_low_ack(sta, num_packets);
}
+ rcu_read_unlock();
}
+}
- if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
- "status = 0x%x",
- mbox->channel_switch_status);
- /*
- * That event uses for two cases:
- * 1) channel switch complete with status=0
- * 2) channel switch failed status=1
- */
-
- /* TODO: configure only the relevant vif */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- bool success;
-
- if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
- &wlvif->flags))
- continue;
-
- success = mbox->channel_switch_status ? false : true;
- vif = wl12xx_wlvif_to_vif(wlvif);
+void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+ wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
+ wlcore_disconnect_sta(wl, sta_bitmap);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
- ieee80211_chswitch_done(vif, success);
- }
- }
+void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
+{
+ wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
+ wlcore_disconnect_sta(wl, sta_bitmap);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
- if ((vector & DUMMY_PACKET_EVENT_ID)) {
- wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- ret = wl1271_tx_dummy_packet(wl);
- if (ret < 0)
- return ret;
- }
+void wlcore_event_roc_complete(struct wl1271 *wl)
+{
+ wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
+ if (wl->roc_vif)
+ ieee80211_ready_on_channel(wl->hw);
+}
+EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
+void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
+{
/*
- * "TX retries exceeded" has a different meaning according to mode.
- * In AP mode the offending station is disconnected.
+ * We are HW_MONITOR device. On beacon loss - queue
+ * connection loss work. Cancel it on REGAINED event.
*/
- if (vector & MAX_TX_RETRY_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
- sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
- disconnect_sta = true;
- }
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
+ int delay = wl->conf.conn.synch_fail_thold *
+ wl->conf.conn.bss_lose_timeout;
- if (vector & INACTIVE_STA_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
- sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
- disconnect_sta = true;
- }
+ wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
- if (disconnect_sta) {
- u32 num_packets = wl->conf.tx.max_tx_retries;
- struct ieee80211_sta *sta;
- const u8 *addr;
- int h;
-
- for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
- bool found = false;
- /* find the ap vif connected to this sta */
- wl12xx_for_each_wlvif_ap(wl, wlvif) {
- if (!test_bit(h, wlvif->ap.sta_hlid_map))
- continue;
- found = true;
- break;
- }
- if (!found)
- continue;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
+ !test_bit(wlvif->role_id , &roles_bitmap))
+ continue;
- vif = wl12xx_wlvif_to_vif(wlvif);
- addr = wl->links[h].addr;
+ /*
+ * if the work is already queued, it should take place.
+ * We don't want to delay the connection loss
+ * indication any more.
+ */
+ ieee80211_queue_delayed_work(wl->hw,
+ &wlvif->connection_loss_work,
+ msecs_to_jiffies(delay));
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, addr);
- if (sta) {
- wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
- ieee80211_report_low_ack(sta, num_packets);
- }
- rcu_read_unlock();
- }
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
+ GFP_KERNEL);
}
- return 0;
}
+EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
int wl1271_event_unmask(struct wl1271 *wl)
{
@@ -305,12 +277,12 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
/* first we read the mbox descriptor */
ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
- sizeof(*wl->mbox), false);
+ wl->mbox_size, false);
if (ret < 0)
return ret;
/* process the descriptor */
- ret = wl1271_event_process(wl);
+ ret = wl->ops->process_mailbox_events(wl);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/event.h b/drivers/net/wireless/ti/wlcore/event.h
index 8adf18d6c58..acc7a59d382 100644
--- a/drivers/net/wireless/ti/wlcore/event.h
+++ b/drivers/net/wireless/ti/wlcore/event.h
@@ -46,33 +46,17 @@ enum {
RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
- MEASUREMENT_START_EVENT_ID = BIT(8),
- MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
- SCAN_COMPLETE_EVENT_ID = BIT(10),
- WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
- AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
- RESERVED1 = BIT(13),
- PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
- ROLE_STOP_COMPLETE_EVENT_ID = BIT(15),
- RADAR_DETECTED_EVENT_ID = BIT(16),
- CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
- BSS_LOSE_EVENT_ID = BIT(18),
- REGAINED_BSS_EVENT_ID = BIT(19),
- MAX_TX_RETRY_EVENT_ID = BIT(20),
- DUMMY_PACKET_EVENT_ID = BIT(21),
- SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
- CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
- SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
- PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
- INACTIVE_STA_EVENT_ID = BIT(26),
- PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
- PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
- PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
- BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
+
EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
};
+/* events the driver might want to wait for */
+enum wlcore_wait_event {
+ WLCORE_EVENT_ROLE_STOP_COMPLETE,
+ WLCORE_EVENT_PEER_REMOVE_COMPLETE,
+ WLCORE_EVENT_DFS_CONFIG_COMPLETE
+};
+
enum {
EVENT_ENTER_POWER_SAVE_FAIL = 0,
EVENT_ENTER_POWER_SAVE_SUCCESS,
@@ -80,61 +64,24 @@ enum {
#define NUM_OF_RSSI_SNR_TRIGGERS 8
-struct event_mailbox {
- __le32 events_vector;
- __le32 events_mask;
- __le32 reserved_1;
- __le32 reserved_2;
-
- u8 number_of_scan_results;
- u8 scan_tag;
- u8 completed_scan_status;
- u8 reserved_3;
-
- u8 soft_gemini_sense_info;
- u8 soft_gemini_protective_info;
- s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
- u8 change_auto_mode_timeout;
- u8 scheduled_scan_status;
- u8 reserved4;
- /* tuned channel (roc) */
- u8 roc_channel;
-
- __le16 hlid_removed_bitmap;
-
- /* bitmap of aged stations (by HLID) */
- __le16 sta_aging_status;
-
- /* bitmap of stations (by HLID) which exceeded max tx retries */
- __le16 sta_tx_retry_exceeded;
-
- /* discovery completed results */
- u8 discovery_tag;
- u8 number_of_preq_results;
- u8 number_of_prsp_results;
- u8 reserved_5;
-
- /* rx ba constraint */
- u8 role_id; /* 0xFF means any role. */
- u8 rx_ba_allowed;
- u8 reserved_6[2];
-
- /* Channel switch results */
-
- u8 channel_switch_role_id;
- u8 channel_switch_status;
- u8 reserved_7[2];
-
- u8 ps_poll_delivery_failure_role_ids;
- u8 stopped_role_ids;
- u8 started_role_ids;
-
- u8 reserved_8[9];
-} __packed;
-
struct wl1271;
int wl1271_event_unmask(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
+void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable);
+void wlcore_event_sched_scan_completed(struct wl1271 *wl,
+ u8 status);
+void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ unsigned long allowed_bitmap);
+void wlcore_event_channel_switch(struct wl1271 *wl,
+ unsigned long roles_bitmap,
+ bool success);
+void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap);
+void wlcore_event_dummy_packet(struct wl1271 *wl);
+void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap);
+void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap);
+void wlcore_event_roc_complete(struct wl1271 *wl);
+void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 2673d783ec1..7fd260c02a0 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -201,4 +201,45 @@ wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
return buf_offset;
}
+static inline void
+wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ if (wl->ops->sta_rc_update)
+ wl->ops->sta_rc_update(wl, wlvif, sta, changed);
+}
+
+static inline int
+wlcore_hw_set_peer_cap(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid)
+{
+ if (wl->ops->set_peer_cap)
+ return wl->ops->set_peer_cap(wl, ht_cap, allow_ht_operation,
+ rate_set, hlid);
+
+ return 0;
+}
+
+static inline bool
+wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ if (!wl->ops->lnk_high_prio)
+ BUG_ON(1);
+
+ return wl->ops->lnk_high_prio(wl, hlid, lnk);
+}
+
+static inline bool
+wlcore_hw_lnk_low_prio(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk)
+{
+ if (!wl->ops->lnk_low_prio)
+ BUG_ON(1);
+
+ return wl->ops->lnk_low_prio(wl, hlid, lnk);
+}
+
#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 32d157f62f3..5c6f11e157d 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -41,14 +41,14 @@ int wl1271_init_templates_config(struct wl1271 *wl)
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
+ wl->scan_templ_id_2_4, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_CFG_PROBE_REQ_5,
+ wl->scan_templ_id_5,
NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
@@ -56,14 +56,16 @@ int wl1271_init_templates_config(struct wl1271 *wl)
if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_APP_PROBE_REQ_2_4, NULL,
+ wl->sched_scan_templ_id_2_4,
+ NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
- CMD_TEMPL_APP_PROBE_REQ_5, NULL,
+ wl->sched_scan_templ_id_5,
+ NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
@@ -463,7 +465,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
- supported_rates = CONF_TX_AP_ENABLED_RATES;
+ supported_rates = CONF_TX_ENABLED_RATES;
/* unconditionally enable HT rates */
supported_rates |= CONF_TX_MCS_RATES;
@@ -575,9 +577,6 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
/* Configure for power according to debugfs */
if (sta_auth != WL1271_PSM_ILLEGAL)
ret = wl1271_acx_sleep_auth(wl, sta_auth);
- /* Configure for power always on */
- else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
/* Configure for ELP power saving */
else
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
@@ -679,6 +678,10 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
+ ret = wlcore_cmd_regdomain_config_locked(wl);
+ if (ret < 0)
+ return ret;
+
/* Bluetooth WLAN coexistence */
ret = wl1271_init_pta(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index f48530fec14..af7d9f9b3b4 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -105,13 +105,13 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
{
int ret;
- ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ ret = wlcore_raw_read(wl, addr, wl->buffer_32,
+ sizeof(*wl->buffer_32), false);
if (ret < 0)
return ret;
if (val)
- *val = le32_to_cpu(wl->buffer_32);
+ *val = le32_to_cpu(*wl->buffer_32);
return 0;
}
@@ -119,9 +119,9 @@ static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
u32 val)
{
- wl->buffer_32 = cpu_to_le32(val);
- return wlcore_raw_write(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ *wl->buffer_32 = cpu_to_le32(val);
+ return wlcore_raw_write(wl, addr, wl->buffer_32,
+ sizeof(*wl->buffer_32), false);
}
static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ce6e62a37e1..2c2ff3e1f84 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -56,8 +56,8 @@
#define WL1271_BOOT_RETRIES 3
static char *fwlog_param;
-static bool bug_on_recovery;
-static bool no_recovery;
+static int bug_on_recovery = -1;
+static int no_recovery = -1;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
struct ieee80211_vif *vif,
@@ -79,12 +79,10 @@ static int wl12xx_set_authorized(struct wl1271 *wl,
if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
if (ret < 0)
return ret;
- wl12xx_croc(wl, wlvif->role_id);
-
wl1271_info("Association completed.");
return 0;
}
@@ -95,6 +93,8 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
struct ieee80211_supported_band *band;
struct ieee80211_channel *ch;
int i;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wl1271 *wl = hw->priv;
band = wiphy->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++) {
@@ -107,6 +107,9 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
IEEE80211_CHAN_PASSIVE_SCAN;
}
+
+ if (likely(wl->state == WLCORE_STATE_ON))
+ wlcore_regdomain_config(wl);
}
static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
@@ -301,6 +304,7 @@ out:
static void wlcore_adjust_conf(struct wl1271 *wl)
{
/* Adjust settings according to optional module parameters */
+
if (fwlog_param) {
if (!strcmp(fwlog_param, "continuous")) {
wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
@@ -316,16 +320,22 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
wl1271_error("Unknown fwlog parameter %s", fwlog_param);
}
}
+
+ if (bug_on_recovery != -1)
+ wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
+
+ if (no_recovery != -1)
+ wl->conf.recovery.no_recovery = (u8) no_recovery;
}
static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid, u8 tx_pkts)
{
- bool fw_ps, single_sta;
+ bool fw_ps, single_link;
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
- single_sta = (wl->active_sta_count == 1);
+ single_link = (wl->active_link_count == 1);
/*
* Wake up from high level PS if the STA is asleep with too little
@@ -336,10 +346,10 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
- * Make an exception if this is the only connected station. In this
- * case FW-memory congestion is not a problem.
+ * Make an exception if this is the only connected link. In this
+ * case FW-memory congestion is less of a problem.
*/
- else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
+ else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -347,11 +357,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct wl_fw_status_2 *status)
{
- struct wl1271_link *lnk;
u32 cur_fw_ps_map;
- u8 hlid, cnt;
-
- /* TODO: also use link_fast_bitmap here */
+ u8 hlid;
cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
@@ -363,17 +370,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
- lnk = &wl->links[hlid];
- cnt = status->counters.tx_lnk_free_pkts[hlid] -
- lnk->prev_freed_pkts;
-
- lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
- lnk->allocated_pkts -= cnt;
-
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
- lnk->allocated_pkts);
- }
+ wl->links[hlid].allocated_pkts);
}
static int wlcore_fw_status(struct wl1271 *wl,
@@ -387,6 +386,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
int i;
size_t status_len;
int ret;
+ struct wl1271_link *lnk;
status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
sizeof(*status_2) + wl->fw_status_priv_len;
@@ -412,6 +412,17 @@ static int wlcore_fw_status(struct wl1271 *wl,
wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
}
+
+ for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+ lnk = &wl->links[i];
+ /* prevent wrap-around in freed-packets counter */
+ lnk->allocated_pkts -=
+ (status_2->counters.tx_lnk_free_pkts[i] -
+ lnk->prev_freed_pkts) & 0xff;
+
+ lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+ }
+
/* prevent wrap-around in total blocks counter */
if (likely(wl->tx_blocks_freed <=
le32_to_cpu(status_2->total_released_blks)))
@@ -464,6 +475,8 @@ static int wlcore_fw_status(struct wl1271 *wl,
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
(s64)le32_to_cpu(status_2->fw_localtime);
+ wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+
return 0;
}
@@ -800,11 +813,13 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
/*
* Make sure the chip is awake and the logger isn't active.
- * Do not send a stop fwlog command if the fw is hanged.
+ * Do not send a stop fwlog command if the fw is hanged or if
+ * dbgpins are used (due to some fw bug).
*/
if (wl1271_ps_elp_wakeup(wl))
goto out;
- if (!wl->watchdog_recovery)
+ if (!wl->watchdog_recovery &&
+ wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
@@ -872,7 +887,8 @@ static void wlcore_print_recovery(struct wl1271 *wl)
if (ret < 0)
return;
- wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
+ wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
+ pc, hint_sts, ++wl->recovery_count);
wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
}
@@ -895,10 +911,10 @@ static void wl1271_recovery_work(struct work_struct *work)
wlcore_print_recovery(wl);
}
- BUG_ON(bug_on_recovery &&
+ BUG_ON(wl->conf.recovery.bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
- if (no_recovery) {
+ if (wl->conf.recovery.no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
goto out_unlock;
}
@@ -918,11 +934,6 @@ static void wl1271_recovery_work(struct work_struct *work)
/* Prevent spurious TX during FW restart */
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
- if (wl->sched_scanning) {
- ieee80211_sched_scan_stopped(wl->hw);
- wl->sched_scanning = false;
- }
-
/* reboot the chipset */
while (!list_empty(&wl->wlvif_list)) {
wlvif = list_first_entry(&wl->wlvif_list,
@@ -1139,7 +1150,6 @@ int wl1271_plt_stop(struct wl1271 *wl)
cancel_work_sync(&wl->recovery_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
- cancel_delayed_work_sync(&wl->connection_loss_work);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
@@ -1167,9 +1177,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
int q, mapping;
u8 hlid;
- if (vif)
- wlvif = wl12xx_vif_to_data(vif);
+ if (!vif) {
+ wl1271_debug(DEBUG_TX, "DROP skb with no vif");
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
@@ -1183,9 +1197,9 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
* allow these packets through.
*/
if (hlid == WL12XX_INVALID_LINK_ID ||
- (wlvif && !test_bit(hlid, wlvif->links_map)) ||
- (wlcore_is_queue_stopped(wl, q) &&
- !wlcore_is_queue_stopped_by_reason(wl, q,
+ (!test_bit(hlid, wlvif->links_map)) ||
+ (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
+ !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb);
@@ -1197,16 +1211,17 @@ static void wl1271_op_tx(struct ieee80211_hw *hw,
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
wl->tx_queue_count[q]++;
+ wlvif->tx_queue_count[q]++;
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
- !wlcore_is_queue_stopped_by_reason(wl, q,
+ if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
+ !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
- wlcore_stop_queue_locked(wl, q,
+ wlcore_stop_queue_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
@@ -1841,11 +1856,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
- cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */
- wl12xx_tx_reset(wl);
mutex_lock(&wl->mutex);
+ wl12xx_tx_reset(wl);
wl1271_power_off(wl);
/*
@@ -1868,14 +1882,17 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl->time_offset = 0;
wl->ap_fw_ps_map = 0;
wl->ap_ps_map = 0;
- wl->sched_scanning = false;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
memset(wl->roles_map, 0, sizeof(wl->roles_map));
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ memset(wl->session_ids, 0, sizeof(wl->session_ids));
wl->active_sta_count = 0;
+ wl->active_link_count = 0;
/* The system link is always allocated */
+ wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
+ wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
/*
@@ -1901,6 +1918,12 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
+
+ /*
+ * FW channels must be re-calibrated after recovery,
+ * clear the last Reg-Domain channel configuration.
+ */
+ memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
static void wlcore_op_stop(struct ieee80211_hw *hw)
@@ -1916,6 +1939,71 @@ static void wlcore_op_stop(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
+static void wlcore_channel_switch_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
+ wl = wlvif->wl;
+
+ wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /* check the channel switch is still ongoing */
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
+ goto out;
+
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_chswitch_done(vif, false);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl12xx_cmd_stop_channel_switch(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static void wlcore_connection_loss_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
+ wl = wlvif->wl;
+
+ wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /* Call mac80211 connection loss */
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ goto out;
+
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2035,15 +2123,15 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
wl12xx_allocate_rate_policy(wl,
&wlvif->ap.ucast_rate_idx[i]);
- wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
+ wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
/*
* TODO: check if basic_rate shouldn't be
* wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
* instead (the same thing for STA above).
*/
- wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
+ wlvif->basic_rate = CONF_TX_ENABLED_RATES;
/* TODO: this seems to be used only for STA, check it */
- wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
+ wlvif->rate_set = CONF_TX_ENABLED_RATES;
}
wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
@@ -2063,6 +2151,10 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl1271_rx_streaming_enable_work);
INIT_WORK(&wlvif->rx_streaming_disable_work,
wl1271_rx_streaming_disable_work);
+ INIT_DELAYED_WORK(&wlvif->channel_switch_work,
+ wlcore_channel_switch_work);
+ INIT_DELAYED_WORK(&wlvif->connection_loss_work,
+ wlcore_connection_loss_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2070,7 +2162,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
return 0;
}
-static bool wl12xx_init_fw(struct wl1271 *wl)
+static int wl12xx_init_fw(struct wl1271 *wl)
{
int retries = WL1271_BOOT_RETRIES;
bool booted = false;
@@ -2136,7 +2228,7 @@ power_off:
wl->state = WLCORE_STATE_ON;
out:
- return booted;
+ return ret;
}
static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
@@ -2196,6 +2288,81 @@ static void wl12xx_force_active_psm(struct wl1271 *wl)
}
}
+struct wlcore_hw_queue_iter_data {
+ unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
+ /* current vif */
+ struct ieee80211_vif *vif;
+ /* is the current vif among those iterated */
+ bool cur_running;
+};
+
+static void wlcore_hw_queue_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct wlcore_hw_queue_iter_data *iter_data = data;
+
+ if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+ return;
+
+ if (iter_data->cur_running || vif == iter_data->vif) {
+ iter_data->cur_running = true;
+ return;
+ }
+
+ __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
+}
+
+static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct wlcore_hw_queue_iter_data iter_data = {};
+ int i, q_base;
+
+ iter_data.vif = vif;
+
+ /* mark all bits taken by active interfaces */
+ ieee80211_iterate_active_interfaces_atomic(wl->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ wlcore_hw_queue_iter, &iter_data);
+
+ /* the current vif is already running in mac80211 (resume/recovery) */
+ if (iter_data.cur_running) {
+ wlvif->hw_queue_base = vif->hw_queue[0];
+ wl1271_debug(DEBUG_MAC80211,
+ "using pre-allocated hw queue base %d",
+ wlvif->hw_queue_base);
+
+ /* interface type might have changed type */
+ goto adjust_cab_queue;
+ }
+
+ q_base = find_first_zero_bit(iter_data.hw_queue_map,
+ WLCORE_NUM_MAC_ADDRESSES);
+ if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
+ return -EBUSY;
+
+ wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
+ wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
+ wlvif->hw_queue_base);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
+ /* register hw queues in mac80211 */
+ vif->hw_queue[i] = wlvif->hw_queue_base + i;
+ }
+
+adjust_cab_queue:
+ /* the last places are reserved for cab queues per interface */
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
+ wlvif->hw_queue_base / NUM_TX_QUEUES;
+ else
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
+ return 0;
+}
+
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -2204,7 +2371,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct vif_counter_data vif_count;
int ret = 0;
u8 role_type;
- bool booted = false;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2242,6 +2408,10 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ ret = wlcore_allocate_hw_queue_base(wl, wlvif);
+ if (ret < 0)
+ goto out;
+
if (wl12xx_need_fw_change(wl, vif_count, true)) {
wl12xx_force_active_psm(wl);
set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
@@ -2261,11 +2431,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
*/
memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
- booted = wl12xx_init_fw(wl);
- if (!booted) {
- ret = -EINVAL;
+ ret = wl12xx_init_fw(wl);
+ if (ret < 0)
goto out;
- }
}
ret = wl12xx_cmd_role_enable(wl, vif->addr,
@@ -2312,7 +2480,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl1271_info("down");
if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
- wl->scan_vif == vif) {
+ wl->scan_wlvif == wlvif) {
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
@@ -2321,11 +2489,21 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
- wl->scan_vif = NULL;
+ wl->scan_wlvif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
+ if (wl->sched_vif == wlvif) {
+ ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_vif = NULL;
+ }
+
+ if (wl->roc_vif == vif) {
+ wl->roc_vif = NULL;
+ ieee80211_remain_on_channel_expired(wl->hw);
+ }
+
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
ret = wl1271_ps_elp_wakeup(wl);
@@ -2394,9 +2572,6 @@ deinit:
/* Configure for power according to debugfs */
if (sta_auth != WL1271_PSM_ILLEGAL)
wl1271_acx_sleep_auth(wl, sta_auth);
- /* Configure for power always on */
- else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
- wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
/* Configure for ELP power saving */
else
wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
@@ -2408,6 +2583,7 @@ unlock:
del_timer_sync(&wlvif->rx_streaming_timer);
cancel_work_sync(&wlvif->rx_streaming_enable_work);
cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_delayed_work_sync(&wlvif->connection_loss_work);
mutex_lock(&wl->mutex);
}
@@ -2466,8 +2642,7 @@ static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
return ret;
}
-static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- bool set_assoc)
+static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
@@ -2487,18 +2662,111 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* clear encryption type */
wlvif->encryption_type = KEY_NONE;
- if (set_assoc)
- set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
-
if (is_ibss)
ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
- else
+ else {
+ if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
+ /*
+ * TODO: this is an ugly workaround for wl12xx fw
+ * bug - we are not able to tx/rx after the first
+ * start_sta, so make dummy start+stop calls,
+ * and then call start_sta again.
+ * this should be fixed in the fw.
+ */
+ wl12xx_cmd_role_start_sta(wl, wlvif);
+ wl12xx_cmd_role_stop_sta(wl, wlvif);
+ }
+
ret = wl12xx_cmd_role_start_sta(wl, wlvif);
+ }
+
+ return ret;
+}
+
+static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
+ int offset)
+{
+ u8 ssid_len;
+ const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
+ skb->len - offset);
+
+ if (!ptr) {
+ wl1271_error("No SSID in IEs!");
+ return -ENOENT;
+ }
+
+ ssid_len = ptr[1];
+ if (ssid_len > IEEE80211_MAX_SSID_LEN) {
+ wl1271_error("SSID is too long!");
+ return -EINVAL;
+ }
+
+ wlvif->ssid_len = ssid_len;
+ memcpy(wlvif->ssid, ptr+2, ssid_len);
+ return 0;
+}
+
+static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct sk_buff *skb;
+ int ieoffset;
+
+ /* we currently only support setting the ssid from the ap probe req */
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
+ return -EINVAL;
+
+ skb = ieee80211_ap_probereq_get(wl->hw, vif);
+ if (!skb)
+ return -EINVAL;
+
+ ieoffset = offsetof(struct ieee80211_mgmt,
+ u.probe_req.variable);
+ wl1271_ssid_set(wlvif, skb, ieoffset);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 sta_rate_set)
+{
+ int ieoffset;
+ int ret;
+
+ wlvif->aid = bss_conf->aid;
+ wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+ wlvif->beacon_int = bss_conf->beacon_int;
+ wlvif->wmm_enabled = bss_conf->qos;
+
+ set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
+
+ /*
+ * with wl1271, we don't need to update the
+ * beacon_int and dtim_period, because the firmware
+ * updates it by itself when the first beacon is
+ * received after a join.
+ */
+ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
- goto out;
+ return ret;
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- goto out;
+ /*
+ * Get a template for hardware connection maintenance
+ */
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+ wlvif,
+ NULL);
+ ieoffset = offsetof(struct ieee80211_mgmt,
+ u.probe_req.variable);
+ wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
+
+ /* enable the connection monitoring feature */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
+ if (ret < 0)
+ return ret;
/*
* The join command disable the keep-alive mode, shut down its process,
@@ -2508,35 +2776,83 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
*/
ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
- goto out;
+ return ret;
ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
- goto out;
+ return ret;
ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
- goto out;
+ return ret;
ret = wl1271_acx_keep_alive_config(wl, wlvif,
wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
- goto out;
+ return ret;
+
+ /*
+ * The default fw psm configuration is AUTO, while mac80211 default
+ * setting is off (ACTIVE), so sync the fw with the correct value.
+ */
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ return ret;
+
+ if (sta_rate_set) {
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set,
+ wlvif->band);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
-out:
return ret;
}
-static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
+ bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+
+ /* make sure we are connected (sta) joined */
+ if (sta &&
+ !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ return false;
+
+ /* make sure we are joined (ibss) */
+ if (!sta &&
+ test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
+ return false;
+
+ if (sta) {
+ /* use defaults when not associated */
+ wlvif->aid = 0;
+
+ /* free probe-request template */
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = NULL;
+
+ /* disable connection monitor features */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+
+ /* Disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+ }
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- wl12xx_cmd_stop_channel_switch(wl);
+ wl12xx_cmd_stop_channel_switch(wl, wlvif);
ieee80211_chswitch_done(vif, false);
+ cancel_delayed_work(&wlvif->channel_switch_work);
}
/* invalidate keep-alive template */
@@ -2544,17 +2860,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_INVALID);
- /* to stop listening to a channel, we disconnect */
- ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
- if (ret < 0)
- goto out;
-
/* reset TX security counters on a clean disconnect */
wlvif->tx_security_last_seq_lsb = 0;
wlvif->tx_security_seq = 0;
-out:
- return ret;
+ return 0;
}
static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
@@ -2563,147 +2873,10 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->rate_set = wlvif->basic_rate_set;
}
-static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- bool idle)
-{
- int ret;
- bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
-
- if (idle == cur_idle)
- return 0;
-
- if (idle) {
- /* no need to croc if we weren't busy (e.g. during boot) */
- if (wl12xx_dev_role_started(wlvif)) {
- ret = wl12xx_stop_dev(wl, wlvif);
- if (ret < 0)
- goto out;
- }
- wlvif->rate_set =
- wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- goto out;
- clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
- } else {
- /* The current firmware only supports sched_scan in idle */
- if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl, wlvif);
- ieee80211_sched_scan_stopped(wl->hw);
- }
-
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- goto out;
- set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
- }
-
-out:
- return ret;
-}
-
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
- bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
- int channel, ret;
-
- channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
-
- /* if the channel changes while joined, join again */
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- ((wlvif->band != conf->channel->band) ||
- (wlvif->channel != channel) ||
- (wlvif->channel_type != conf->channel_type))) {
- /* send all pending packets */
- ret = wlcore_tx_work_locked(wl);
- if (ret < 0)
- return ret;
-
- wlvif->band = conf->channel->band;
- wlvif->channel = channel;
- wlvif->channel_type = conf->channel_type;
-
- if (is_ap) {
- wl1271_set_band_rate(wl, wlvif);
- ret = wl1271_init_ap_rates(wl, wlvif);
- if (ret < 0)
- wl1271_error("AP rate policy change failed %d",
- ret);
- } else {
- /*
- * FIXME: the mac80211 should really provide a fixed
- * rate to use here. for now, just use the smallest
- * possible rate for the band as a fixed rate for
- * association frames and other control messages.
- */
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- wl1271_set_band_rate(wl, wlvif);
-
- wlvif->basic_rate =
- wl1271_tx_min_rate_get(wl,
- wlvif->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- wl1271_warning("rate policy for channel "
- "failed %d", ret);
-
- /*
- * change the ROC channel. do it only if we are
- * not idle. otherwise, CROC will be called
- * anyway.
- */
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
- &wlvif->flags) &&
- wl12xx_dev_role_started(wlvif) &&
- !(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_stop_dev(wl, wlvif);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- return ret;
- }
- }
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
-
- if ((conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
- !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
-
- int ps_mode;
- char *ps_mode_str;
-
- if (wl->conf.conn.forced_ps) {
- ps_mode = STATION_POWER_SAVE_MODE;
- ps_mode_str = "forced";
- } else {
- ps_mode = STATION_AUTO_PS_MODE;
- ps_mode_str = "auto";
- }
-
- wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
-
- ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
-
- if (ret < 0)
- wl1271_warning("enter %s ps failed %d",
- ps_mode_str, ret);
-
- } else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
-
- wl1271_debug(DEBUG_PSM, "auto ps disabled");
-
- ret = wl1271_ps_set_mode(wl, wlvif,
- STATION_ACTIVE_MODE);
- if (ret < 0)
- wl1271_warning("exit auto ps failed %d", ret);
- }
- }
+ int ret;
if (conf->power_level != wlvif->power_level) {
ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
@@ -2721,37 +2894,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
struct ieee80211_conf *conf = &hw->conf;
- int channel, ret = 0;
-
- channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+ int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
" changed 0x%x",
- channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
changed);
- /*
- * mac80211 will go to idle nearly immediately after transmitting some
- * frames, such as the deauth. To make sure those frames reach the air,
- * wait here until the TX queue is fully flushed.
- */
- if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
- ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE)))
- wl1271_tx_flush(wl);
-
mutex_lock(&wl->mutex);
- /* we support configuring the channel and band even while off */
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- wl->band = conf->channel->band;
- wl->channel = channel;
- wl->channel_type = conf->channel_type;
- }
-
if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
@@ -3071,10 +3224,7 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* stop the queues and flush to ensure the next packets are
* in sync with FW spare block accounting
*/
- mutex_lock(&wl->mutex);
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
- mutex_unlock(&wl->mutex);
-
wl1271_tx_flush(wl);
}
@@ -3200,6 +3350,29 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
}
EXPORT_SYMBOL_GPL(wlcore_set_key);
+void wlcore_regdomain_config(struct wl1271 *wl)
+{
+ int ret;
+
+ if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
+ return;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_cmd_regdomain_config_locked(wl);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
@@ -3239,7 +3412,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out_sleep;
}
- ret = wl1271_scan(hw->priv, vif, ssid, len, req);
+ ret = wlcore_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -3252,6 +3425,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
@@ -3269,7 +3443,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
goto out;
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
- ret = wl1271_scan_stop(wl);
+ ret = wl->ops->scan_stop(wl, wlvif);
if (ret < 0)
goto out_sleep;
}
@@ -3282,7 +3456,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
- wl->scan_vif = NULL;
+ wl->scan_wlvif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
@@ -3316,15 +3490,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
+ ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
- ret = wl1271_scan_sched_scan_start(wl, wlvif);
- if (ret < 0)
- goto out_sleep;
-
- wl->sched_scanning = true;
+ wl->sched_vif = wlvif;
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3351,7 +3521,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- wl1271_scan_sched_scan_stop(wl, wlvif);
+ wl->ops->sched_scan_stop(wl, wlvif);
wl1271_ps_elp_sleep(wl);
out:
@@ -3416,30 +3586,6 @@ out:
return ret;
}
-static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
- int offset)
-{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- u8 ssid_len;
- const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
- skb->len - offset);
-
- if (!ptr) {
- wl1271_error("No SSID in IEs!");
- return -ENOENT;
- }
-
- ssid_len = ptr[1];
- if (ssid_len > IEEE80211_MAX_SSID_LEN) {
- wl1271_error("SSID is too long!");
- return -EINVAL;
- }
-
- wlvif->ssid_len = ssid_len;
- memcpy(wlvif->ssid, ptr+2, ssid_len);
- return 0;
-}
-
static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
{
int len;
@@ -3620,7 +3766,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
wl1271_debug(DEBUG_MASTER, "beacon updated");
- ret = wl1271_ssid_set(vif, beacon, ieoffset);
+ ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
@@ -3637,6 +3783,12 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
goto out;
}
+ wlvif->wmm_enabled =
+ cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ beacon->data + ieoffset,
+ beacon->len - ieoffset);
+
/*
* In case we already have a probe-resp beacon set explicitly
* by usermode, don't use the beacon data.
@@ -3690,7 +3842,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
- if ((changed & BSS_CHANGED_BEACON_INT)) {
+ if (changed & BSS_CHANGED_BEACON_INT) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
@@ -3703,7 +3855,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
}
- if ((changed & BSS_CHANGED_BEACON)) {
+ if (changed & BSS_CHANGED_BEACON) {
ret = wlcore_set_beacon_template(wl, vif, is_ap);
if (ret < 0)
goto out;
@@ -3724,7 +3876,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
- if ((changed & BSS_CHANGED_BASIC_RATES)) {
+ if (changed & BSS_CHANGED_BASIC_RATES) {
u32 rates = bss_conf->basic_rates;
wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
@@ -3755,7 +3907,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if (ret < 0)
goto out;
- if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (bss_conf->enable_beacon) {
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
ret = wl12xx_cmd_role_start_ap(wl, wlvif);
@@ -3802,6 +3954,79 @@ out:
return;
}
+static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 sta_rate_set)
+{
+ u32 rates;
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211,
+ "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
+ bss_conf->bssid, bss_conf->aid,
+ bss_conf->beacon_int,
+ bss_conf->basic_rates, sta_rate_set);
+
+ wlvif->beacon_int = bss_conf->beacon_int;
+ rates = bss_conf->basic_rates;
+ wlvif->basic_rate_set =
+ wl1271_tx_enabled_rates_get(wl, rates,
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+
+ if (sta_rate_set)
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set,
+ wlvif->band);
+
+ /* we only support sched_scan while not connected */
+ if (wl->sched_vif == wlvif)
+ wl->ops->sched_scan_stop(wl, wlvif);
+
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_build_null_data(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
+ if (ret < 0)
+ return ret;
+
+ wlcore_set_ssid(wl, wlvif);
+
+ set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+ return 0;
+}
+
+static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ /* revert back to minimum rates for the current band */
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
+
+ clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+ return 0;
+}
/* STA/IBSS mode changes */
static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_vif *vif,
@@ -3809,7 +4034,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- bool do_join = false, set_assoc = false;
+ bool do_join = false;
bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
@@ -3830,9 +4055,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
- if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
- &wlvif->flags))
- wl1271_unjoin(wl, wlvif);
+ wlcore_unset_assoc(wl, wlvif);
+ wl12xx_cmd_role_stop_sta(wl, wlvif);
}
}
@@ -3850,13 +4074,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
- if (changed & BSS_CHANGED_IDLE && !is_ibss) {
- ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
- if (ret < 0)
- wl1271_warning("idle mode change failed %d", ret);
- }
-
- if ((changed & BSS_CHANGED_CQM)) {
+ if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
@@ -3868,150 +4086,39 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if (changed & BSS_CHANGED_BSSID)
- if (!is_zero_ether_addr(bss_conf->bssid)) {
- ret = wl12xx_cmd_build_null_data(wl, wlvif);
- if (ret < 0)
- goto out;
-
- ret = wl1271_build_qos_null_data(wl, vif);
- if (ret < 0)
- goto out;
- }
-
- if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
+ if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
+ BSS_CHANGED_ASSOC)) {
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (!sta)
- goto sta_not_found;
-
- /* save the supp_rates of the ap */
- sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
- if (sta->ht_cap.ht_supported)
- sta_rate_set |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
- (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
- sta_ht_cap = sta->ht_cap;
- sta_exists = true;
-
-sta_not_found:
+ if (sta) {
+ u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
+
+ /* save the supp_rates of the ap */
+ sta_rate_set = sta->supp_rates[wlvif->band];
+ if (sta->ht_cap.ht_supported)
+ sta_rate_set |=
+ (rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (rx_mask[1] << HW_MIMO_RATES_OFFSET);
+ sta_ht_cap = sta->ht_cap;
+ sta_exists = true;
+ }
+
rcu_read_unlock();
}
- if ((changed & BSS_CHANGED_ASSOC)) {
- if (bss_conf->assoc) {
- u32 rates;
- int ieoffset;
- wlvif->aid = bss_conf->aid;
- wlvif->channel_type =
- cfg80211_get_chandef_type(&bss_conf->chandef);
- wlvif->beacon_int = bss_conf->beacon_int;
- do_join = true;
- set_assoc = true;
-
- /*
- * use basic rates from AP, and determine lowest rate
- * to use with control frames.
- */
- rates = bss_conf->basic_rates;
- wlvif->basic_rate_set =
- wl1271_tx_enabled_rates_get(wl, rates,
- wlvif->band);
- wlvif->basic_rate =
- wl1271_tx_min_rate_get(wl,
- wlvif->basic_rate_set);
- if (sta_rate_set)
- wlvif->rate_set =
- wl1271_tx_enabled_rates_get(wl,
- sta_rate_set,
- wlvif->band);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- goto out;
-
- /*
- * with wl1271, we don't need to update the
- * beacon_int and dtim_period, because the firmware
- * updates it by itself when the first beacon is
- * received after a join.
- */
- ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
+ if (changed & BSS_CHANGED_BSSID) {
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ ret = wlcore_set_bssid(wl, wlvif, bss_conf,
+ sta_rate_set);
if (ret < 0)
goto out;
- /*
- * Get a template for hardware connection maintenance
- */
- dev_kfree_skb(wlvif->probereq);
- wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
- wlvif,
- NULL);
- ieoffset = offsetof(struct ieee80211_mgmt,
- u.probe_req.variable);
- wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
-
- /* enable the connection monitoring feature */
- ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
- if (ret < 0)
- goto out;
+ /* Need to update the BSSID (for filtering etc) */
+ do_join = true;
} else {
- /* use defaults when not associated */
- bool was_assoc =
- !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
- &wlvif->flags);
- bool was_ifup =
- !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
- &wlvif->flags);
- wlvif->aid = 0;
-
- /* free probe-request template */
- dev_kfree_skb(wlvif->probereq);
- wlvif->probereq = NULL;
-
- /* revert back to minimum rates for the current band */
- wl1271_set_band_rate(wl, wlvif);
- wlvif->basic_rate =
- wl1271_tx_min_rate_get(wl,
- wlvif->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- if (ret < 0)
- goto out;
-
- /* disable connection monitor features */
- ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
-
- /* Disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
+ ret = wlcore_clear_bssid(wl, wlvif);
if (ret < 0)
goto out;
-
- /* restore the bssid filter and go to dummy bssid */
- if (was_assoc) {
- /*
- * we might have to disable roc, if there was
- * no IF_OPER_UP notification.
- */
- if (!was_ifup) {
- ret = wl12xx_croc(wl, wlvif->role_id);
- if (ret < 0)
- goto out;
- }
- /*
- * (we also need to disable roc in case of
- * roaming on the same channel. until we will
- * have a better flow...)
- */
- if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
- ret = wl12xx_croc(wl,
- wlvif->dev_role_id);
- if (ret < 0)
- goto out;
- }
-
- wl1271_unjoin(wl, wlvif);
- if (!bss_conf->idle)
- wl12xx_start_dev(wl, wlvif);
- }
}
}
@@ -4041,71 +4148,87 @@ sta_not_found:
goto out;
if (do_join) {
- ret = wl1271_join(wl, wlvif, set_assoc);
+ ret = wlcore_join(wl, wlvif);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
}
+ }
- /* ROC until connected (after EAPOL exchange) */
- if (!is_ibss) {
- ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ ret = wlcore_set_assoc(wl, wlvif, bss_conf,
+ sta_rate_set);
if (ret < 0)
goto out;
if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
wl12xx_set_authorized(wl, wlvif);
+ } else {
+ wlcore_unset_assoc(wl, wlvif);
}
- /*
- * stop device role if started (we might already be in
- * STA/IBSS role).
- */
- if (wl12xx_dev_role_started(wlvif)) {
- ret = wl12xx_stop_dev(wl, wlvif);
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ if ((bss_conf->ps) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+ int ps_mode;
+ char *ps_mode_str;
+
+ if (wl->conf.conn.forced_ps) {
+ ps_mode = STATION_POWER_SAVE_MODE;
+ ps_mode_str = "forced";
+ } else {
+ ps_mode = STATION_AUTO_PS_MODE;
+ ps_mode_str = "auto";
+ }
+
+ wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
+
+ ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
if (ret < 0)
- goto out;
+ wl1271_warning("enter %s ps failed %d",
+ ps_mode_str, ret);
+ } else if (!bss_conf->ps &&
+ test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+ wl1271_debug(DEBUG_PSM, "auto ps disabled");
+
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_ACTIVE_MODE);
+ if (ret < 0)
+ wl1271_warning("exit auto ps failed %d", ret);
}
}
/* Handle new association with HT. Do this after join. */
- if (sta_exists) {
- if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = wl1271_acx_set_ht_capabilities(wl,
- &sta_ht_cap,
- true,
- wlvif->sta.hlid);
- if (ret < 0) {
- wl1271_warning("Set ht cap true failed %d",
- ret);
- goto out;
- }
+ if (sta_exists &&
+ (changed & BSS_CHANGED_HT)) {
+ bool enabled =
+ bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+
+ ret = wlcore_hw_set_peer_cap(wl,
+ &sta_ht_cap,
+ enabled,
+ wlvif->rate_set,
+ wlvif->sta.hlid);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap failed %d", ret);
+ goto out;
+
}
- /* handle new association without HT and disassociation */
- else if (changed & BSS_CHANGED_ASSOC) {
- ret = wl1271_acx_set_ht_capabilities(wl,
- &sta_ht_cap,
- false,
- wlvif->sta.hlid);
+
+ if (enabled) {
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
+ bss_conf->ht_operation_mode);
if (ret < 0) {
- wl1271_warning("Set ht cap false failed %d",
+ wl1271_warning("Set ht information failed %d",
ret);
goto out;
}
}
}
- /* Handle HT information change. Done after join. */
- if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = wl1271_acx_set_ht_information(wl, wlvif,
- bss_conf->ht_operation_mode);
- if (ret < 0) {
- wl1271_warning("Set ht information failed %d", ret);
- goto out;
- }
- }
-
/* Handle arp filtering. Done after join. */
if ((changed & BSS_CHANGED_ARP_FILTER) ||
(!is_ibss && (changed & BSS_CHANGED_QOS))) {
@@ -4113,8 +4236,7 @@ sta_not_found:
wlvif->sta.qos = bss_conf->qos;
WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
- if (bss_conf->arp_addr_cnt == 1 &&
- bss_conf->arp_filter_enabled) {
+ if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
wlvif->ip_addr = addr;
/*
* The template should have been configured only upon
@@ -4155,15 +4277,15 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
- (int)changed);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
+ wlvif->role_id, (int)changed);
/*
* make sure to cancel pending disconnections if our association
* state changed
*/
if (!is_ap && (changed & BSS_CHANGED_ASSOC))
- cancel_delayed_work_sync(&wl->connection_loss_work);
+ cancel_delayed_work_sync(&wlvif->connection_loss_work);
if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
!bss_conf->enable_beacon)
@@ -4192,6 +4314,76 @@ out:
mutex_unlock(&wl->mutex);
}
+static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def));
+ return 0;
+}
+
+static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def));
+}
+
+static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 change chanctx %d (type %d) changed 0x%x",
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def), changed);
+}
+
+static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int channel = ieee80211_frequency_to_channel(
+ ctx->def.chan->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 assign chanctx (role %d) %d (type %d)",
+ wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
+
+ mutex_lock(&wl->mutex);
+
+ wlvif->band = ctx->def.chan->band;
+ wlvif->channel = channel;
+ wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
+
+ /* update default rates according to the band */
+ wl1271_set_band_rate(wl, wlvif);
+
+ mutex_unlock(&wl->mutex);
+
+ return 0;
+}
+
+static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
+ wl1271_debug(DEBUG_MAC80211,
+ "mac80211 unassign chanctx (role %d) %d (type %d)",
+ wlvif->role_id,
+ ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
+ cfg80211_get_chandef_type(&ctx->def));
+
+ wl1271_tx_flush(wl);
+}
+
static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
@@ -4319,8 +4511,6 @@ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
return;
clear_bit(hlid, wlvif->ap.sta_hlid_map);
- memset(wl->links[hlid].addr, 0, ETH_ALEN);
- wl->links[hlid].ba_bitmap = 0;
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
wl12xx_free_link(wl, wlvif, &hlid);
@@ -4380,6 +4570,45 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
return ret;
}
+static void wlcore_roc_if_possible(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ if (find_first_bit(wl->roc_map,
+ WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
+ return;
+
+ if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
+ return;
+
+ wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
+}
+
+static void wlcore_update_inconn_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta,
+ bool in_connection)
+{
+ if (in_connection) {
+ if (WARN_ON(wl_sta->in_connection))
+ return;
+ wl_sta->in_connection = true;
+ if (!wlvif->inconn_count++)
+ wlcore_roc_if_possible(wl, wlvif);
+ } else {
+ if (!wl_sta->in_connection)
+ return;
+
+ wl_sta->in_connection = false;
+ wlvif->inconn_count--;
+ if (WARN_ON(wlvif->inconn_count < 0))
+ return;
+
+ if (!wlvif->inconn_count)
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+ }
+}
+
static int wl12xx_update_sta_state(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta,
@@ -4398,8 +4627,13 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
/* Add station (AP mode) */
if (is_ap &&
old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE)
- return wl12xx_sta_add(wl, wlvif, sta);
+ new_state == IEEE80211_STA_NONE) {
+ ret = wl12xx_sta_add(wl, wlvif, sta);
+ if (ret)
+ return ret;
+
+ wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
+ }
/* Remove station (AP mode) */
if (is_ap &&
@@ -4407,35 +4641,59 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
new_state == IEEE80211_STA_NOTEXIST) {
/* must not fail */
wl12xx_sta_remove(wl, wlvif, sta);
- return 0;
+
+ wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
}
/* Authorize station (AP mode) */
if (is_ap &&
new_state == IEEE80211_STA_AUTHORIZED) {
- ret = wl12xx_cmd_set_peer_state(wl, hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
if (ret < 0)
return ret;
ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
hlid);
- return ret;
+ if (ret)
+ return ret;
+
+ wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
}
/* Authorize station */
if (is_sta &&
new_state == IEEE80211_STA_AUTHORIZED) {
set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
- return wl12xx_set_authorized(wl, wlvif);
+ ret = wl12xx_set_authorized(wl, wlvif);
+ if (ret)
+ return ret;
}
if (is_sta &&
old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
- return 0;
+ clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
}
+ /* clear ROCs on failure or authorization */
+ if (is_sta &&
+ (new_state == IEEE80211_STA_AUTHORIZED ||
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+ }
+
+ if (is_sta &&
+ old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ if (find_first_bit(wl->roc_map,
+ WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
+ WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
+ wl12xx_roc(wl, wlvif, wlvif->role_id,
+ wlvif->band, wlvif->channel);
+ }
+ }
return 0;
}
@@ -4500,18 +4758,18 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
hlid = wlvif->sta.hlid;
- ba_bitmap = &wlvif->sta.ba_rx_bitmap;
} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
- ba_bitmap = &wl->links[hlid].ba_bitmap;
} else {
ret = -EINVAL;
goto out;
}
+ ba_bitmap = &wl->links[hlid].ba_bitmap;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -4665,12 +4923,23 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
/* TODO: change mac80211 to pass vif as param */
wl12xx_for_each_wlvif_sta(wl, wlvif) {
- ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
+ unsigned long delay_usec;
+
+ ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
+ if (ret)
+ goto out_sleep;
- if (!ret)
- set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+
+ /* indicate failure 5 seconds after channel switch time */
+ delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
+ ch_switch->count;
+ ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
+ usecs_to_jiffies(delay_usec) +
+ msecs_to_jiffies(5000));
}
+out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -4684,6 +4953,144 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
wl1271_tx_flush(wl);
}
+static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271 *wl = hw->priv;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
+ channel, wlvif->role_id);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /* return EBUSY if we can't ROC right now */
+ if (WARN_ON(wl->roc_vif ||
+ find_first_bit(wl->roc_map,
+ WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
+ if (ret < 0)
+ goto out_sleep;
+
+ wl->roc_vif = vif;
+ ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
+ msecs_to_jiffies(duration));
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int __wlcore_roc_completed(struct wl1271 *wl)
+{
+ struct wl12xx_vif *wlvif;
+ int ret;
+
+ /* already completed */
+ if (unlikely(!wl->roc_vif))
+ return 0;
+
+ wlvif = wl12xx_vif_to_data(wl->roc_vif);
+
+ if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ return -EBUSY;
+
+ ret = wl12xx_stop_dev(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ wl->roc_vif = NULL;
+
+ return 0;
+}
+
+static int wlcore_roc_completed(struct wl1271 *wl)
+{
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "roc complete");
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = __wlcore_roc_completed(wl);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static void wlcore_roc_complete_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wl = container_of(dwork, struct wl1271, roc_complete_work);
+
+ ret = wlcore_roc_completed(wl);
+ if (!ret)
+ ieee80211_remain_on_channel_expired(wl->hw);
+}
+
+static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct wl1271 *wl = hw->priv;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
+
+ /* TODO: per-vif */
+ wl1271_tx_flush(wl);
+
+ /*
+ * we can't just flush_work here, because it might deadlock
+ * (as we might get called from the same workqueue)
+ */
+ cancel_delayed_work_sync(&wl->roc_complete_work);
+ wlcore_roc_completed(wl);
+
+ return 0;
+}
+
+static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271 *wl = hw->priv;
+
+ wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
+}
+
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
@@ -4747,20 +5154,20 @@ static struct ieee80211_rate wl1271_rates[] = {
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
- { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
- { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
- { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
- { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
- { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
- { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
- { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
- { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
- { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
- { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
- { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
- { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
- { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
- { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
+ { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
};
/* can't be const, mac80211 writes to this */
@@ -4801,40 +5208,40 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
- { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
- { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
- { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
- { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
- { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
- { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
- { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
- { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
- { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
- { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
- { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
- { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
- { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
- { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
- { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
- { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
- { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
- { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
- { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
- { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
- { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
- { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
- { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
- { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
- { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
- { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
- { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
- { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
- { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
- { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
- { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
- { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
- { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
+ { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
};
static struct ieee80211_supported_band wl1271_band_5ghz = {
@@ -4875,6 +5282,14 @@ static const struct ieee80211_ops wl1271_ops = {
.set_bitrate_mask = wl12xx_set_bitrate_mask,
.channel_switch = wl12xx_op_channel_switch,
.flush = wlcore_op_flush,
+ .remain_on_channel = wlcore_op_remain_on_channel,
+ .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
+ .add_chanctx = wlcore_op_add_chanctx,
+ .remove_chanctx = wlcore_op_remove_chanctx,
+ .change_chanctx = wlcore_op_change_chanctx,
+ .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
+ .sta_rc_update = wlcore_op_sta_rc_update,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -5044,34 +5459,6 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
-static void wl1271_connection_loss_work(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wl1271 *wl;
- struct ieee80211_vif *vif;
- struct wl12xx_vif *wlvif;
-
- dwork = container_of(work, struct delayed_work, work);
- wl = container_of(dwork, struct wl1271, connection_loss_work);
-
- wl1271_info("Connection loss work.");
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state != WLCORE_STATE_ON))
- goto out;
-
- /* Call mac80211 connection loss */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- goto out;
- vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_connection_loss(vif);
- }
-out:
- mutex_unlock(&wl->mutex);
-}
-
static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
{
int i;
@@ -5117,7 +5504,7 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
ret = wl12xx_set_power_on(wl);
if (ret < 0)
- goto out;
+ return ret;
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
if (ret < 0)
@@ -5207,10 +5594,9 @@ static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
},
};
-static const struct ieee80211_iface_combination
+static struct ieee80211_iface_combination
wlcore_iface_combinations[] = {
{
- .num_different_channels = 1,
.max_interfaces = 3,
.limits = wlcore_iface_limits,
.n_limits = ARRAY_SIZE(wlcore_iface_limits),
@@ -5219,6 +5605,7 @@ wlcore_iface_combinations[] = {
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
+ int i;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -5249,7 +5636,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_SCAN_WHILE_IDLE;
+ IEEE80211_HW_QUEUE_CONTROL;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5271,6 +5658,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
+ wl->hw->wiphy->max_remain_on_channel_duration = 5000;
+
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -5279,6 +5668,22 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
ARRAY_SIZE(wl1271_channels_5ghz) >
WL1271_MAX_CHANNELS);
/*
+ * clear channel flags from the previous usage
+ * and restore max_power & max_antenna_gain values.
+ */
+ for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
+ wl1271_band_2ghz.channels[i].flags = 0;
+ wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
+ wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
+ wl1271_band_5ghz.channels[i].flags = 0;
+ wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
+ wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
+ }
+
+ /*
* We keep local copies of the band structs because we need to
* modify them on a per-device basis.
*/
@@ -5298,7 +5703,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&wl->bands[IEEE80211_BAND_5GHZ];
- wl->hw->queues = 4;
+ /*
+ * allow 4 queues per mac address we support +
+ * 1 cab queue per mac + one global offchannel Tx queue
+ */
+ wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
+
+ /* the last queue is the offchannel queue */
+ wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
wl->hw->max_rates = 1;
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
@@ -5311,6 +5723,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
+ wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
wl->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(wlcore_iface_combinations);
@@ -5327,7 +5740,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
+ u32 mbox_size)
{
struct ieee80211_hw *hw;
struct wl1271 *wl;
@@ -5369,9 +5783,8 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+ INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
- INIT_DELAYED_WORK(&wl->connection_loss_work,
- wl1271_connection_loss_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -5387,14 +5800,15 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
wl->flags = 0;
wl->sg_enabled = true;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
+ wl->recovery_count = 0;
wl->hw_pg_ver = -1;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->platform_quirks = 0;
- wl->sched_scanning = false;
wl->system_hlid = WL12XX_SYSTEM_HLID;
wl->active_sta_count = 0;
+ wl->active_link_count = 0;
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
@@ -5434,14 +5848,24 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
goto err_dummy_packet;
}
- wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
+ wl->mbox_size = mbox_size;
+ wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
if (!wl->mbox) {
ret = -ENOMEM;
goto err_fwlog;
}
+ wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
+ if (!wl->buffer_32) {
+ ret = -ENOMEM;
+ goto err_mbox;
+ }
+
return hw;
+err_mbox:
+ kfree(wl->mbox);
+
err_fwlog:
free_page((unsigned long)wl->fwlog);
@@ -5480,6 +5904,8 @@ int wlcore_free_hw(struct wl1271 *wl)
device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+ kfree(wl->buffer_32);
+ kfree(wl->mbox);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
@@ -5536,7 +5962,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
- struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+ struct wl12xx_platform_data *pdata = pdev_data->pdata;
unsigned long irqflags;
int ret;
@@ -5565,8 +5992,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
wl->irq = platform_get_irq(pdev, 0);
wl->platform_quirks = pdata->platform_quirks;
- wl->set_power = pdata->set_power;
- wl->if_ops = pdata->ops;
+ wl->if_ops = pdev_data->if_ops;
if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
irqflags = IRQF_TRIGGER_RISING;
@@ -5712,10 +6138,10 @@ module_param_named(fwlog, fwlog_param, charp, 0);
MODULE_PARM_DESC(fwlog,
"FW logger options: continuous, ondemand, dbgpins or disable");
-module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
+module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
-module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
+module_param(no_recovery, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 4d1414a673f..9b7b6e2e4fb 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -151,9 +151,6 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
wl12xx_queue_recovery_work(wl);
ret = -ETIMEDOUT;
goto err;
- } else if (ret < 0) {
- wl1271_error("ELP wakeup completion error.");
- goto err;
}
}
@@ -242,11 +239,12 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
struct ieee80211_tx_info *info;
unsigned long flags;
int filtered[NUM_TX_QUEUES];
+ struct wl1271_link *lnk = &wl->links[hlid];
/* filter all frames currently in the low level queues for this hlid */
for (i = 0; i < NUM_TX_QUEUES; i++) {
filtered[i] = 0;
- while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
filtered[i]++;
if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
@@ -260,8 +258,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
}
spin_lock_irqsave(&wl->wl_lock, flags);
- for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_queue_count[i] -= filtered[i];
+ if (lnk->wlvif)
+ lnk->wlvif->tx_queue_count[i] -= filtered[i];
+ }
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 9ee0ec6fd1d..6791a1a6afb 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -92,11 +92,16 @@ static void wl1271_rx_status(struct wl1271 *wl,
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) {
+ if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) {
status->flag |= RX_FLAG_MMIC_ERROR;
- wl1271_warning("Michael MIC error");
+ wl1271_warning("Michael MIC error. Desc: 0x%x",
+ desc_err_code);
}
}
+
+ if (beacon)
+ wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
+ status->band);
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
@@ -108,7 +113,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
u8 *buf;
u8 beacon = 0;
u8 is_data = 0;
- u8 reserved = 0;
+ u8 reserved = 0, offset_to_data = 0;
u16 seq_num;
u32 pkt_data_len;
@@ -128,6 +133,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (rx_align == WLCORE_RX_BUF_UNALIGNED)
reserved = RX_BUF_ALIGN;
+ else if (rx_align == WLCORE_RX_BUF_PADDED)
+ offset_to_data = RX_BUF_ALIGN;
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
@@ -139,19 +146,15 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return 0;
}
- switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
/* discard corrupted packets */
- case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
- case WL1271_RX_DESC_DECRYPT_FAIL:
- wl1271_warning("corrupted packet in RX with status: 0x%x",
- desc->status & WL1271_RX_DESC_STATUS_MASK);
- return -EINVAL;
- case WL1271_RX_DESC_SUCCESS:
- case WL1271_RX_DESC_MIC_FAIL:
- break;
- default:
- wl1271_error("invalid RX descriptor status: 0x%x",
- desc->status & WL1271_RX_DESC_STATUS_MASK);
+ if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) {
+ hdr = (void *)(data + sizeof(*desc) + offset_to_data);
+ wl1271_warning("corrupted packet in RX: status: 0x%x len: %d",
+ desc->status & WL1271_RX_DESC_STATUS_MASK,
+ pkt_data_len);
+ wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc),
+ min(pkt_data_len,
+ ieee80211_hdrlen(hdr->frame_control)));
return -EINVAL;
}
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 71eba189991..3363f60fb7d 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -84,12 +84,11 @@
* Bits 3-5 - process_id tag (AP mode FW)
* Bits 6-7 - reserved
*/
-#define WL1271_RX_DESC_STATUS_MASK 0x03
+#define WL1271_RX_DESC_STATUS_MASK 0x07
#define WL1271_RX_DESC_SUCCESS 0x00
#define WL1271_RX_DESC_DECRYPT_FAIL 0x01
#define WL1271_RX_DESC_MIC_FAIL 0x02
-#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03
#define RX_MEM_BLOCK_MASK 0xFF
#define RX_BUF_SIZE_MASK 0xFFF00
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index d00501493df..f407101e525 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -35,7 +35,6 @@ void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
- struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
int ret;
@@ -52,8 +51,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- vif = wl->scan_vif;
- wlvif = wl12xx_vif_to_data(vif);
+ wlvif = wl->scan_wlvif;
/*
* Rearm the tx watchdog just before idling scan. This
@@ -64,7 +62,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
- wl->scan_vif = NULL;
+ wl->scan_wlvif = NULL;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
@@ -82,6 +80,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl12xx_queue_recovery_work(wl);
}
+ wlcore_cmd_regdomain_config_locked(wl);
+
ieee80211_scan_completed(wl->hw, false);
out:
@@ -89,371 +89,99 @@ out:
}
-
-static int wl1271_get_scan_channels(struct wl1271 *wl,
- struct cfg80211_scan_request *req,
- struct basic_scan_channel_params *channels,
- enum ieee80211_band band, bool passive)
-{
- struct conf_scan_settings *c = &wl->conf.scan;
- int i, j;
- u32 flags;
-
- for (i = 0, j = 0;
- i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
- i++) {
- flags = req->channels[i]->flags;
-
- if (!test_bit(i, wl->scan.scanned_ch) &&
- !(flags & IEEE80211_CHAN_DISABLED) &&
- (req->channels[i]->band == band) &&
- /*
- * In passive scans, we scan all remaining
- * channels, even if not marked as such.
- * In active scans, we only scan channels not
- * marked as passive.
- */
- (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
- wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req->channels[i]->band,
- req->channels[i]->center_freq);
- wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req->channels[i]->hw_value,
- req->channels[i]->flags);
- wl1271_debug(DEBUG_SCAN,
- "max_antenna_gain %d, max_power %d",
- req->channels[i]->max_antenna_gain,
- req->channels[i]->max_power);
- wl1271_debug(DEBUG_SCAN, "beacon_found %d",
- req->channels[i]->beacon_found);
-
- if (!passive) {
- channels[j].min_duration =
- cpu_to_le32(c->min_dwell_time_active);
- channels[j].max_duration =
- cpu_to_le32(c->max_dwell_time_active);
- } else {
- channels[j].min_duration =
- cpu_to_le32(c->min_dwell_time_passive);
- channels[j].max_duration =
- cpu_to_le32(c->max_dwell_time_passive);
- }
- channels[j].early_termination = 0;
- channels[j].tx_power_att = req->channels[i]->max_power;
- channels[j].channel = req->channels[i]->hw_value;
-
- memset(&channels[j].bssid_lsb, 0xff, 4);
- memset(&channels[j].bssid_msb, 0xff, 2);
-
- /* Mark the channels we already used */
- set_bit(i, wl->scan.scanned_ch);
-
- j++;
- }
- }
-
- return j;
-}
-
-#define WL1271_NOTHING_TO_SCAN 1
-
-static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
- enum ieee80211_band band,
- bool passive, u32 basic_rate)
+static void wlcore_started_vifs_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- struct wl1271_cmd_scan *cmd;
- struct wl1271_cmd_trigger_scan_to *trigger;
- int ret;
- u16 scan_options = 0;
-
- /* skip active scans if we don't have SSIDs */
- if (!passive && wl->scan.req->n_ssids == 0)
- return WL1271_NOTHING_TO_SCAN;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!cmd || !trigger) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (wl->conf.scan.split_scan_timeout)
- scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
-
- if (passive)
- scan_options |= WL1271_SCAN_OPT_PASSIVE;
-
- cmd->params.role_id = wlvif->role_id;
-
- if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
- ret = -EINVAL;
- goto out;
- }
-
- cmd->params.scan_options = cpu_to_le16(scan_options);
-
- cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
- cmd->channels,
- band, passive);
- if (cmd->params.n_ch == 0) {
- ret = WL1271_NOTHING_TO_SCAN;
- goto out;
- }
-
- cmd->params.tx_rate = cpu_to_le32(basic_rate);
- cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
- cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
-
- if (band == IEEE80211_BAND_2GHZ)
- cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
- else
- cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
-
- if (wl->scan.ssid_len && wl->scan.ssid) {
- cmd->params.ssid_len = wl->scan.ssid_len;
- memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
- }
-
- memcpy(cmd->addr, vif->addr, ETH_ALEN);
-
- ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- cmd->params.role_id, band,
- wl->scan.ssid, wl->scan.ssid_len,
- wl->scan.req->ie,
- wl->scan.req->ie_len, false);
- if (ret < 0) {
- wl1271_error("PROBE request template failed");
- goto out;
- }
-
- trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
- ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger), 0);
- if (ret < 0) {
- wl1271_error("trigger scan to failed for hw scan");
- goto out;
- }
-
- wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
+ int *count = (int *)data;
- ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("SCAN failed");
- goto out;
- }
-
-out:
- kfree(cmd);
- kfree(trigger);
- return ret;
+ if (!vif->bss_conf.idle)
+ (*count)++;
}
-void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
+static int wlcore_count_started_vifs(struct wl1271 *wl)
{
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- int ret = 0;
- enum ieee80211_band band;
- u32 rate, mask;
-
- switch (wl->scan.state) {
- case WL1271_SCAN_STATE_IDLE:
- break;
-
- case WL1271_SCAN_STATE_2GHZ_ACTIVE:
- band = IEEE80211_BAND_2GHZ;
- mask = wlvif->bitrate_masks[band];
- if (wl->scan.req->no_cck) {
- mask &= ~CONF_TX_CCK_RATES;
- if (!mask)
- mask = CONF_TX_RATE_MASK_BASIC_P2P;
- }
- rate = wl1271_tx_min_rate_get(wl, mask);
- ret = wl1271_scan_send(wl, vif, band, false, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
-
- case WL1271_SCAN_STATE_2GHZ_PASSIVE:
- band = IEEE80211_BAND_2GHZ;
- mask = wlvif->bitrate_masks[band];
- if (wl->scan.req->no_cck) {
- mask &= ~CONF_TX_CCK_RATES;
- if (!mask)
- mask = CONF_TX_RATE_MASK_BASIC_P2P;
- }
- rate = wl1271_tx_min_rate_get(wl, mask);
- ret = wl1271_scan_send(wl, vif, band, true, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- if (wl->enable_11a)
- wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
- else
- wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
+ int count = 0;
- case WL1271_SCAN_STATE_5GHZ_ACTIVE:
- band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, vif, band, false, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
-
- case WL1271_SCAN_STATE_5GHZ_PASSIVE:
- band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, vif, band, true, rate);
- if (ret == WL1271_NOTHING_TO_SCAN) {
- wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl, vif);
- }
-
- break;
-
- case WL1271_SCAN_STATE_DONE:
- wl->scan.failed = false;
- cancel_delayed_work(&wl->scan_complete_work);
- ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
- msecs_to_jiffies(0));
- break;
-
- default:
- wl1271_error("invalid scan state");
- break;
- }
-
- if (ret < 0) {
- cancel_delayed_work(&wl->scan_complete_work);
- ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
- msecs_to_jiffies(0));
- }
-}
-
-int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
- const u8 *ssid, size_t ssid_len,
- struct cfg80211_scan_request *req)
-{
- /*
- * cfg80211 should guarantee that we don't get more channels
- * than what we have registered.
- */
- BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
-
- if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
- return -EBUSY;
-
- wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
-
- if (ssid_len && ssid) {
- wl->scan.ssid_len = ssid_len;
- memcpy(wl->scan.ssid, ssid, ssid_len);
- } else {
- wl->scan.ssid_len = 0;
- }
-
- wl->scan_vif = vif;
- wl->scan.req = req;
- memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
-
- /* we assume failure so that timeout scenarios are handled correctly */
- wl->scan.failed = true;
- ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
- msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
-
- wl1271_scan_stm(wl, vif);
-
- return 0;
-}
-
-int wl1271_scan_stop(struct wl1271 *wl)
-{
- struct wl1271_cmd_header *cmd = NULL;
- int ret = 0;
-
- if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
- return -EINVAL;
-
- wl1271_debug(DEBUG_CMD, "cmd scan stop");
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
- sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("cmd stop_scan failed");
- goto out;
- }
-out:
- kfree(cmd);
- return ret;
+ ieee80211_iterate_active_interfaces_atomic(wl->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ wlcore_started_vifs_iter, &count);
+ return count;
}
static int
-wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
- struct cfg80211_sched_scan_request *req,
- struct conn_scan_ch_params *channels,
- u32 band, bool radar, bool passive,
- int start, int max_channels,
- u8 *n_pactive_ch)
+wlcore_scan_get_channels(struct wl1271 *wl,
+ struct ieee80211_channel *req_channels[],
+ u32 n_channels,
+ u32 n_ssids,
+ struct conn_scan_ch_params *channels,
+ u32 band, bool radar, bool passive,
+ int start, int max_channels,
+ u8 *n_pactive_ch,
+ int scan_type)
{
- struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
u32 flags;
- bool force_passive = !req->n_ssids;
- u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe;
+ bool force_passive = !n_ssids;
+ u32 min_dwell_time_active, max_dwell_time_active;
u32 dwell_time_passive, dwell_time_dfs;
- if (band == IEEE80211_BAND_5GHZ)
- delta_per_probe = c->dwell_time_delta_per_probe_5;
- else
- delta_per_probe = c->dwell_time_delta_per_probe;
+ /* configure dwell times according to scan type */
+ if (scan_type == SCAN_TYPE_SEARCH) {
+ struct conf_scan_settings *c = &wl->conf.scan;
+ bool active_vif_exists = !!wlcore_count_started_vifs(wl);
+
+ min_dwell_time_active = active_vif_exists ?
+ c->min_dwell_time_active :
+ c->min_dwell_time_active_long;
+ max_dwell_time_active = active_vif_exists ?
+ c->max_dwell_time_active :
+ c->max_dwell_time_active_long;
+ dwell_time_passive = c->dwell_time_passive;
+ dwell_time_dfs = c->dwell_time_dfs;
+ } else {
+ struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
+ u32 delta_per_probe;
- min_dwell_time_active = c->base_dwell_time +
- req->n_ssids * c->num_probe_reqs * delta_per_probe;
+ if (band == IEEE80211_BAND_5GHZ)
+ delta_per_probe = c->dwell_time_delta_per_probe_5;
+ else
+ delta_per_probe = c->dwell_time_delta_per_probe;
- max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta;
+ min_dwell_time_active = c->base_dwell_time +
+ n_ssids * c->num_probe_reqs * delta_per_probe;
+ max_dwell_time_active = min_dwell_time_active +
+ c->max_dwell_time_delta;
+ dwell_time_passive = c->dwell_time_passive;
+ dwell_time_dfs = c->dwell_time_dfs;
+ }
min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
- dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000);
- dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000);
+ dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000);
+ dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000);
for (i = 0, j = start;
- i < req->n_channels && j < max_channels;
+ i < n_channels && j < max_channels;
i++) {
- flags = req->channels[i]->flags;
+ flags = req_channels[i]->flags;
if (force_passive)
flags |= IEEE80211_CHAN_PASSIVE_SCAN;
- if ((req->channels[i]->band == band) &&
+ if ((req_channels[i]->band == band) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req->channels[i]->band,
- req->channels[i]->center_freq);
+ req_channels[i]->band,
+ req_channels[i]->center_freq);
wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req->channels[i]->hw_value,
- req->channels[i]->flags);
+ req_channels[i]->hw_value,
+ req_channels[i]->flags);
wl1271_debug(DEBUG_SCAN, "max_power %d",
- req->channels[i]->max_power);
+ req_channels[i]->max_power);
wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
min_dwell_time_active,
max_dwell_time_active);
@@ -473,10 +201,11 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].max_duration =
cpu_to_le16(max_dwell_time_active);
- channels[j].tx_power_att = req->channels[i]->max_power;
- channels[j].channel = req->channels[i]->hw_value;
+ channels[j].tx_power_att = req_channels[i]->max_power;
+ channels[j].channel = req_channels[i]->hw_value;
- if ((band == IEEE80211_BAND_2GHZ) &&
+ if (n_pactive_ch &&
+ (band == IEEE80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
(flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
@@ -500,51 +229,80 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
return j - start;
}
-static bool
-wl1271_scan_sched_scan_channels(struct wl1271 *wl,
- struct cfg80211_sched_scan_request *req,
- struct wl1271_cmd_sched_scan_config *cfg)
+bool
+wlcore_set_scan_chan_params(struct wl1271 *wl,
+ struct wlcore_scan_channels *cfg,
+ struct ieee80211_channel *channels[],
+ u32 n_channels,
+ u32 n_ssids,
+ int scan_type)
{
u8 n_pactive_ch = 0;
cfg->passive[0] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
- IEEE80211_BAND_2GHZ,
- false, true, 0,
- MAX_CHANNELS_2GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_2,
+ IEEE80211_BAND_2GHZ,
+ false, true, 0,
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch,
+ scan_type);
cfg->active[0] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
- IEEE80211_BAND_2GHZ,
- false, false,
- cfg->passive[0],
- MAX_CHANNELS_2GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_2,
+ IEEE80211_BAND_2GHZ,
+ false, false,
+ cfg->passive[0],
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch,
+ scan_type);
cfg->passive[1] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
- IEEE80211_BAND_5GHZ,
- false, true, 0,
- MAX_CHANNELS_5GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_5,
+ IEEE80211_BAND_5GHZ,
+ false, true, 0,
+ wl->max_channels_5,
+ &n_pactive_ch,
+ scan_type);
cfg->dfs =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
- IEEE80211_BAND_5GHZ,
- true, true,
- cfg->passive[1],
- MAX_CHANNELS_5GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_5,
+ IEEE80211_BAND_5GHZ,
+ true, true,
+ cfg->passive[1],
+ wl->max_channels_5,
+ &n_pactive_ch,
+ scan_type);
cfg->active[1] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
- IEEE80211_BAND_5GHZ,
- false, false,
- cfg->passive[1] + cfg->dfs,
- MAX_CHANNELS_5GHZ,
- &n_pactive_ch);
+ wlcore_scan_get_channels(wl,
+ channels,
+ n_channels,
+ n_ssids,
+ cfg->channels_5,
+ IEEE80211_BAND_5GHZ,
+ false, false,
+ cfg->passive[1] + cfg->dfs,
+ wl->max_channels_5,
+ &n_pactive_ch,
+ scan_type);
+
/* 802.11j channels are not supported yet */
cfg->passive[2] = 0;
cfg->active[2] = 0;
- cfg->n_pactive_ch = n_pactive_ch;
+ cfg->passive_active = n_pactive_ch;
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
@@ -556,10 +314,48 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
cfg->passive[1] || cfg->active[1] || cfg->dfs ||
cfg->passive[2] || cfg->active[2];
}
+EXPORT_SYMBOL_GPL(wlcore_set_scan_chan_params);
+
+int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
+ struct cfg80211_scan_request *req)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
+ /*
+ * cfg80211 should guarantee that we don't get more channels
+ * than what we have registered.
+ */
+ BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
+
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
+ return -EBUSY;
+
+ wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
+
+ if (ssid_len && ssid) {
+ wl->scan.ssid_len = ssid_len;
+ memcpy(wl->scan.ssid, ssid, ssid_len);
+ } else {
+ wl->scan.ssid_len = 0;
+ }
+
+ wl->scan_wlvif = wlvif;
+ wl->scan.req = req;
+ memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+
+ /* we assume failure so that timeout scenarios are handled correctly */
+ wl->scan.failed = true;
+ ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
+ msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
+ wl->ops->scan_start(wl, wlvif, req);
+
+ return 0;
+}
/* Returns the scan type to be used or a negative value on error */
-static int
-wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
+int
+wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req)
{
@@ -662,160 +458,12 @@ out:
return ret;
return type;
}
+EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_ssid_list);
-int wl1271_scan_sched_scan_config(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_sched_scan_ies *ies)
-{
- struct wl1271_cmd_sched_scan_config *cfg = NULL;
- struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
- int i, ret;
- bool force_passive = !req->n_ssids;
-
- wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
-
- cfg->role_id = wlvif->role_id;
- cfg->rssi_threshold = c->rssi_threshold;
- cfg->snr_threshold = c->snr_threshold;
- cfg->n_probe_reqs = c->num_probe_reqs;
- /* cycles set to 0 it means infinite (until manually stopped) */
- cfg->cycles = 0;
- /* report APs when at least 1 is found */
- cfg->report_after = 1;
- /* don't stop scanning automatically when something is found */
- cfg->terminate = 0;
- cfg->tag = WL1271_SCAN_DEFAULT_TAG;
- /* don't filter on BSS type */
- cfg->bss_type = SCAN_BSS_TYPE_ANY;
- /* currently NL80211 supports only a single interval */
- for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
- cfg->intervals[i] = cpu_to_le32(req->interval);
-
- cfg->ssid_len = 0;
- ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
- if (ret < 0)
- goto out;
-
- cfg->filter_type = ret;
-
- wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
-
- if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) {
- wl1271_error("scan channel list is empty");
- ret = -EINVAL;
- goto out;
- }
-
- if (!force_passive && cfg->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
- ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- wlvif->role_id, band,
- req->ssids[0].ssid,
- req->ssids[0].ssid_len,
- ies->ie[band],
- ies->len[band], true);
- if (ret < 0) {
- wl1271_error("2.4GHz PROBE request template failed");
- goto out;
- }
- }
-
- if (!force_passive && cfg->active[1]) {
- u8 band = IEEE80211_BAND_5GHZ;
- ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- wlvif->role_id, band,
- req->ssids[0].ssid,
- req->ssids[0].ssid_len,
- ies->ie[band],
- ies->len[band], true);
- if (ret < 0) {
- wl1271_error("5GHz PROBE request template failed");
- goto out;
- }
- }
-
- wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
-
- ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
- sizeof(*cfg), 0);
- if (ret < 0) {
- wl1271_error("SCAN configuration failed");
- goto out;
- }
-out:
- kfree(cfg);
- return ret;
-}
-
-int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
-{
- struct wl1271_cmd_sched_scan_start *start;
- int ret = 0;
-
- wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
-
- if (wlvif->bss_type != BSS_TYPE_STA_BSS)
- return -EOPNOTSUPP;
-
- if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- return -EBUSY;
-
- start = kzalloc(sizeof(*start), GFP_KERNEL);
- if (!start)
- return -ENOMEM;
-
- start->role_id = wlvif->role_id;
- start->tag = WL1271_SCAN_DEFAULT_TAG;
-
- ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
- sizeof(*start), 0);
- if (ret < 0) {
- wl1271_error("failed to send scan start command");
- goto out_free;
- }
-
-out_free:
- kfree(start);
- return ret;
-}
-
-void wl1271_scan_sched_scan_results(struct wl1271 *wl)
+void wlcore_scan_sched_scan_results(struct wl1271 *wl)
{
wl1271_debug(DEBUG_SCAN, "got periodic scan results");
ieee80211_sched_scan_results(wl->hw);
}
-
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
-{
- struct wl1271_cmd_sched_scan_stop *stop;
- int ret = 0;
-
- wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
-
- /* FIXME: what to do if alloc'ing to stop fails? */
- stop = kzalloc(sizeof(*stop), GFP_KERNEL);
- if (!stop) {
- wl1271_error("failed to alloc memory to send sched scan stop");
- return;
- }
-
- stop->role_id = wlvif->role_id;
- stop->tag = WL1271_SCAN_DEFAULT_TAG;
-
- ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
- sizeof(*stop), 0);
- if (ret < 0) {
- wl1271_error("failed to send sched scan stop command");
- goto out_free;
- }
-
-out_free:
- kfree(stop);
-}
+EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_results);
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 29f3c8d6b04..a6ab24b5c0f 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -26,22 +26,20 @@
#include "wlcore.h"
-int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
-int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
+void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_complete_work(struct work_struct *work);
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_results(struct wl1271 *wl);
+void wlcore_scan_sched_scan_results(struct wl1271 *wl);
#define WL1271_SCAN_MAX_CHANNELS 24
#define WL1271_SCAN_DEFAULT_TAG 1
@@ -66,56 +64,6 @@ enum {
WL1271_SCAN_STATE_DONE
};
-struct basic_scan_params {
- /* Scan option flags (WL1271_SCAN_OPT_*) */
- __le16 scan_options;
- u8 role_id;
- /* Number of scan channels in the list (maximum 30) */
- u8 n_ch;
- /* This field indicates the number of probe requests to send
- per channel for an active scan */
- u8 n_probe_reqs;
- u8 tid_trigger;
- u8 ssid_len;
- u8 use_ssid_list;
-
- /* Rate bit field for sending the probes */
- __le32 tx_rate;
-
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- /* Band to scan */
- u8 band;
-
- u8 scan_tag;
- u8 padding2[2];
-} __packed;
-
-struct basic_scan_channel_params {
- /* Duration in TU to wait for frames on a channel for active scan */
- __le32 min_duration;
- __le32 max_duration;
- __le32 bssid_lsb;
- __le16 bssid_msb;
- u8 early_termination;
- u8 tx_power_att;
- u8 channel;
- /* FW internal use only! */
- u8 dfs_candidate;
- u8 activity_detected;
- u8 pad;
-} __packed;
-
-struct wl1271_cmd_scan {
- struct wl1271_cmd_header header;
-
- struct basic_scan_params params;
- struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
-
- /* src mac address */
- u8 addr[ETH_ALEN];
- u8 padding[2];
-} __packed;
-
struct wl1271_cmd_trigger_scan_to {
struct wl1271_cmd_header header;
@@ -123,9 +71,17 @@ struct wl1271_cmd_trigger_scan_to {
} __packed;
#define MAX_CHANNELS_2GHZ 14
-#define MAX_CHANNELS_5GHZ 23
#define MAX_CHANNELS_4GHZ 4
+/*
+ * This max value here is used only for the struct definition of
+ * wlcore_scan_channels. This struct is used by both 12xx
+ * and 18xx (which have different max 5ghz channels value).
+ * In order to make sure this is large enough, just use the
+ * max possible 5ghz channels.
+ */
+#define MAX_CHANNELS_5GHZ 42
+
#define SCAN_MAX_CYCLE_INTERVALS 16
#define SCAN_MAX_BANDS 3
@@ -160,43 +116,6 @@ struct conn_scan_ch_params {
u8 padding[3];
} __packed;
-struct wl1271_cmd_sched_scan_config {
- struct wl1271_cmd_header header;
-
- __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
-
- s8 rssi_threshold; /* for filtering (in dBm) */
- s8 snr_threshold; /* for filtering (in dB) */
-
- u8 cycles; /* maximum number of scan cycles */
- u8 report_after; /* report when this number of results are received */
- u8 terminate; /* stop scanning after reporting */
-
- u8 tag;
- u8 bss_type; /* for filtering */
- u8 filter_type;
-
- u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
- u8 ssid[IEEE80211_MAX_SSID_LEN];
-
- u8 n_probe_reqs; /* Number of probes requests per channel */
-
- u8 passive[SCAN_MAX_BANDS];
- u8 active[SCAN_MAX_BANDS];
-
- u8 dfs;
-
- u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
- channels in BG band */
- u8 role_id;
- u8 padding[1];
-
- struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
- struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
- struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
-} __packed;
-
-
#define SCHED_SCAN_MAX_SSIDS 16
enum {
@@ -220,21 +139,34 @@ struct wl1271_cmd_sched_scan_ssid_list {
u8 padding[2];
} __packed;
-struct wl1271_cmd_sched_scan_start {
- struct wl1271_cmd_header header;
+struct wlcore_scan_channels {
+ u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */
+ u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */
+ u8 dfs; /* number of dfs channels in 5ghz */
+ u8 passive_active; /* number of passive before active channels 2.4ghz */
- u8 tag;
- u8 role_id;
- u8 padding[2];
-} __packed;
-
-struct wl1271_cmd_sched_scan_stop {
- struct wl1271_cmd_header header;
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
+};
- u8 tag;
- u8 role_id;
- u8 padding[2];
-} __packed;
+enum {
+ SCAN_TYPE_SEARCH = 0,
+ SCAN_TYPE_PERIODIC = 1,
+ SCAN_TYPE_TRACKING = 2,
+};
+bool
+wlcore_set_scan_chan_params(struct wl1271 *wl,
+ struct wlcore_scan_channels *cfg,
+ struct ieee80211_channel *channels[],
+ u32 n_channels,
+ u32 n_ssids,
+ int scan_type);
+
+int
+wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req);
#endif /* __WL1271_SCAN_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a3a20be2036..29ef2492951 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wl12xx_platform_data *wlan_data;
+ struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -228,10 +228,18 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue)
+ pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
goto out;
+ pdev_data->if_ops = &sdio_ops;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&func->dev, "can't allocate glue\n");
+ goto out_free_pdev_data;
+ }
+
glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
@@ -240,9 +248,9 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- wlan_data = wl12xx_get_platform_data();
- if (IS_ERR(wlan_data)) {
- ret = PTR_ERR(wlan_data);
+ pdev_data->pdata = wl12xx_get_platform_data();
+ if (IS_ERR(pdev_data->pdata)) {
+ ret = PTR_ERR(pdev_data->pdata);
dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
goto out_free_glue;
}
@@ -252,9 +260,7 @@ static int wl1271_probe(struct sdio_func *func,
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- wlan_data->pwr_in_suspend = true;
-
- wlan_data->ops = &sdio_ops;
+ pdev_data->pdata->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -272,7 +278,7 @@ static int wl1271_probe(struct sdio_func *func,
else
chip_family = "wl12xx";
- glue->core = platform_device_alloc(chip_family, -1);
+ glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
@@ -283,7 +289,7 @@ static int wl1271_probe(struct sdio_func *func,
memset(res, 0x00, sizeof(res));
- res[0].start = wlan_data->irq;
+ res[0].start = pdev_data->pdata->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
@@ -293,8 +299,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, wlan_data,
- sizeof(*wlan_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -313,6 +319,9 @@ out_dev_put:
out_free_glue:
kfree(glue);
+out_free_pdev_data:
+ kfree(pdev_data);
+
out:
return ret;
}
@@ -324,8 +333,7 @@ static void wl1271_remove(struct sdio_func *func)
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
- platform_device_del(glue->core);
- platform_device_put(glue->core);
+ platform_device_unregister(glue->core);
kfree(glue);
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 18cadc07b75..e2644783268 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -87,8 +87,11 @@ static void wl12xx_spi_reset(struct device *child)
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
- if (!cmd)
+ if (!cmd) {
+ dev_err(child->parent,
+ "could not allocate cmd for spi reset\n");
return;
+ }
memset(&t, 0, sizeof(t));
spi_message_init(&m);
@@ -112,8 +115,11 @@ static void wl12xx_spi_init(struct device *child)
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
- if (!cmd)
+ if (!cmd) {
+ dev_err(child->parent,
+ "could not allocate cmd for spi init\n");
return;
+ }
memset(crc, 0, sizeof(crc));
memset(&t, 0, sizeof(t));
@@ -264,7 +270,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+ struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
u32 *cmd;
@@ -321,21 +327,28 @@ static struct wl1271_if_operations spi_ops = {
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wl12xx_platform_data *pdata;
+ struct wlcore_platdev_data *pdev_data;
struct resource res[1];
int ret = -ENOMEM;
- pdata = spi->dev.platform_data;
- if (!pdata) {
+ pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ goto out;
+
+ pdev_data->pdata = spi->dev.platform_data;
+ if (!pdev_data->pdata) {
dev_err(&spi->dev, "no platform data\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_pdev_data;
}
- pdata->ops = &spi_ops;
+ pdev_data->if_ops = &spi_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue)
- goto out;
+ if (!glue) {
+ dev_err(&spi->dev, "can't allocate glue\n");
+ goto out_free_pdev_data;
+ }
glue->dev = &spi->dev;
@@ -351,7 +364,7 @@ static int wl1271_probe(struct spi_device *spi)
goto out_free_glue;
}
- glue->core = platform_device_alloc("wl12xx", -1);
+ glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
ret = -ENOMEM;
@@ -372,7 +385,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -391,6 +405,10 @@ out_dev_put:
out_free_glue:
kfree(glue);
+
+out_free_pdev_data:
+ kfree(pdev_data);
+
out:
return ret;
}
@@ -399,8 +417,7 @@ static int wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
- platform_device_del(glue->core);
- platform_device_put(glue->core);
+ platform_device_unregister(glue->core);
kfree(glue);
return 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index a90d3cd0940..ece392c54d9 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -104,7 +104,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid)
{
- bool fw_ps, single_sta;
+ bool fw_ps, single_link;
u8 tx_pkts;
if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
@@ -112,15 +112,15 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
- single_sta = (wl->active_sta_count == 1);
+ single_link = (wl->active_link_count == 1);
/*
* if in FW PS and there is enough data in FW we can put the link
* into high-level PS and clean out its TX queues.
- * Make an exception if this is the only connected station. In this
- * case FW-memory congestion is not a problem.
+ * Make an exception if this is the only connected link. In this
+ * case FW-memory congestion is less of a problem.
*/
- if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
+ if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -155,21 +155,18 @@ static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
- if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
- return wl->system_hlid;
+ struct ieee80211_tx_info *control;
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
- if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
- test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
- !ieee80211_is_auth(hdr->frame_control) &&
- !ieee80211_is_assoc_req(hdr->frame_control))
- return wlvif->sta.hlid;
- else
+ control = IEEE80211_SKB_CB(skb);
+ if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ wl1271_debug(DEBUG_TX, "tx offchannel");
return wlvif->dev_hlid;
+ }
+
+ return wlvif->sta.hlid;
}
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
@@ -224,9 +221,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
- wlvif->bss_type == BSS_TYPE_AP_BSS &&
- test_bit(hlid, wlvif->ap.sta_hlid_map))
+ if (test_bit(hlid, wl->links_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -293,9 +288,14 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
} else if (wlvif) {
+ u8 session_id = wl->session_ids[hlid];
+
+ if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
+ (wlvif->bss_type == BSS_TYPE_AP_BSS))
+ session_id = 0;
+
/* configure the tx attributes */
- tx_attr = wlvif->session_counter <<
- TX_HW_ATTR_OFST_SESSION_COUNTER;
+ tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
@@ -452,20 +452,22 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
int i;
+ struct wl12xx_vif *wlvif;
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- if (wlcore_is_queue_stopped_by_reason(wl, i,
- WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
- wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
- /* firmware buffer has space, restart queues */
- wlcore_wake_queue(wl, i,
- WLCORE_QUEUE_STOP_REASON_WATERMARK);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
+ wlvif->tx_queue_count[i] <=
+ WL1271_TX_QUEUE_LOW_WATERMARK)
+ /* firmware buffer has space, restart queues */
+ wlcore_wake_queue(wl, wlvif, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
}
}
-static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
- struct sk_buff_head *queues)
+static int wlcore_select_ac(struct wl1271 *wl)
{
int i, q = -1, ac;
u32 min_pkts = 0xffffffff;
@@ -479,45 +481,60 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
*/
for (i = 0; i < NUM_TX_QUEUES; i++) {
ac = wl1271_tx_get_queue(i);
- if (!skb_queue_empty(&queues[ac]) &&
- (wl->tx_allocated_pkts[ac] < min_pkts)) {
+ if (wl->tx_queue_count[ac] &&
+ wl->tx_allocated_pkts[ac] < min_pkts) {
q = ac;
min_pkts = wl->tx_allocated_pkts[q];
}
}
- if (q == -1)
- return NULL;
-
- return &queues[q];
+ return q;
}
-static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
- struct wl1271_link *lnk)
+static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk, u8 q)
{
struct sk_buff *skb;
unsigned long flags;
- struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, lnk->tx_queue);
- if (!queue)
- return NULL;
-
- skb = skb_dequeue(queue);
+ skb = skb_dequeue(&lnk->tx_queue[q]);
if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
+ if (lnk->wlvif) {
+ WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
+ lnk->wlvif->tx_queue_count[q]--;
+ }
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return skb;
}
-static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- u8 *hlid)
+static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
+ u8 hlid, u8 ac,
+ u8 *low_prio_hlid)
+{
+ struct wl1271_link *lnk = &wl->links[hlid];
+
+ if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
+ if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
+ !skb_queue_empty(&lnk->tx_queue[ac]) &&
+ wlcore_hw_lnk_low_prio(wl, hlid, lnk))
+ /* we found the first non-empty low priority queue */
+ *low_prio_hlid = hlid;
+
+ return NULL;
+ }
+
+ return wlcore_lnk_dequeue(wl, lnk, ac);
+}
+
+static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 ac, u8 *hlid,
+ u8 *low_prio_hlid)
{
struct sk_buff *skb = NULL;
int i, h, start_hlid;
@@ -533,7 +550,8 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
if (!test_bit(h, wlvif->links_map))
continue;
- skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
+ low_prio_hlid);
if (!skb)
continue;
@@ -553,42 +571,74 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
unsigned long flags;
struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
+ int ac;
+ u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
+
+ ac = wlcore_select_ac(wl);
+ if (ac < 0)
+ goto out;
/* continue from last wlvif (round robin) */
if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) {
- skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
- if (skb) {
- wl->last_wlvif = wlvif;
- break;
- }
+ if (!wlvif->tx_queue_count[ac])
+ continue;
+
+ skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
+ &low_prio_hlid);
+ if (!skb)
+ continue;
+
+ wl->last_wlvif = wlvif;
+ break;
}
}
/* dequeue from the system HLID before the restarting wlvif list */
if (!skb) {
- skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
- *hlid = wl->system_hlid;
+ skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
+ ac, &low_prio_hlid);
+ if (skb) {
+ *hlid = wl->system_hlid;
+ wl->last_wlvif = NULL;
+ }
}
- /* do a new pass over the wlvif list */
+ /* Do a new pass over the wlvif list. But no need to continue
+ * after last_wlvif. The previous pass should have found it. */
if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) {
- skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
+ if (!wlvif->tx_queue_count[ac])
+ goto next;
+
+ skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
+ &low_prio_hlid);
if (skb) {
wl->last_wlvif = wlvif;
break;
}
- /*
- * No need to continue after last_wlvif. The previous
- * pass should have found it.
- */
+next:
if (wlvif == wl->last_wlvif)
break;
}
}
+ /* no high priority skbs found - but maybe a low priority one? */
+ if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
+ struct wl1271_link *lnk = &wl->links[low_prio_hlid];
+ skb = wlcore_lnk_dequeue(wl, lnk, ac);
+
+ WARN_ON(!skb); /* we checked this before */
+ *hlid = low_prio_hlid;
+
+ /* ensure proper round robin in the vif/link levels */
+ wl->last_wlvif = lnk->wlvif;
+ if (lnk->wlvif)
+ lnk->wlvif->last_tx_hlid = low_prio_hlid;
+
+ }
+
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
@@ -602,6 +652,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
+out:
return skb;
}
@@ -623,6 +674,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count[q]++;
+ if (wlvif)
+ wlvif->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -699,7 +752,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
bool has_data = false;
wlvif = NULL;
- if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+ if (!wl12xx_is_dummy_packet(wl, skb))
wlvif = wl12xx_vif_to_data(info->control.vif);
else
hlid = wl->system_hlid;
@@ -972,10 +1025,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
unsigned long flags;
struct ieee80211_tx_info *info;
int total[NUM_TX_QUEUES];
+ struct wl1271_link *lnk = &wl->links[hlid];
for (i = 0; i < NUM_TX_QUEUES; i++) {
total[i] = 0;
- while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
if (!wl12xx_is_dummy_packet(wl, skb)) {
@@ -990,8 +1044,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
}
spin_lock_irqsave(&wl->wl_lock, flags);
- for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_queue_count[i] -= total[i];
+ if (lnk->wlvif)
+ lnk->wlvif->tx_queue_count[i] -= total[i];
+ }
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
@@ -1004,16 +1061,18 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* TX failure */
for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+ /* this calls wl12xx_free_link */
wl1271_free_sta(wl, wlvif, i);
- else
- wlvif->sta.ba_rx_bitmap = 0;
-
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
+ } else {
+ u8 hlid = i;
+ wl12xx_free_link(wl, wlvif, &hlid);
+ }
}
wlvif->last_tx_hlid = 0;
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wlvif->tx_queue_count[i] = 0;
}
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl)
@@ -1023,7 +1082,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
struct ieee80211_tx_info *info;
/* only reset the queues if something bad happened */
- if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
+ if (wl1271_tx_total_queue_count(wl) != 0) {
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
@@ -1135,45 +1194,48 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
return BIT(__ffs(rate_set));
}
+EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
-void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
- enum wlcore_queue_stop_reason reason)
+void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue, enum wlcore_queue_stop_reason reason)
{
- bool stopped = !!wl->queue_stop_reasons[queue];
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+ bool stopped = !!wl->queue_stop_reasons[hwq];
/* queue should not be stopped for this reason */
- WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
+ WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
if (stopped)
return;
- ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+ ieee80211_stop_queue(wl->hw, hwq);
}
-void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
- wlcore_stop_queue_locked(wl, queue, reason);
+ wlcore_stop_queue_locked(wl, wlvif, queue, reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue should not be clear for this reason */
- WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
+ WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
- if (wl->queue_stop_reasons[queue])
+ if (wl->queue_stop_reasons[hwq])
goto out;
- ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+ ieee80211_wake_queue(wl->hw, hwq);
out:
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -1183,48 +1245,74 @@ void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
+ unsigned long flags;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wlcore_stop_queue(wl, i, reason);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ /* mark all possible queues as stopped */
+ for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
+ WARN_ON_ONCE(test_and_set_bit(reason,
+ &wl->queue_stop_reasons[i]));
+
+ /* use the global version to make sure all vifs in mac80211 we don't
+ * know are stopped.
+ */
+ ieee80211_stop_queues(wl->hw);
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-EXPORT_SYMBOL_GPL(wlcore_stop_queues);
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
+ unsigned long flags;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wlcore_wake_queue(wl, i, reason);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ /* mark all possible queues as awake */
+ for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
+ WARN_ON_ONCE(!test_and_clear_bit(reason,
+ &wl->queue_stop_reasons[i]));
+
+ /* use the global version to make sure all vifs in mac80211 we don't
+ * know are woken up.
+ */
+ ieee80211_wake_queues(wl->hw);
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-EXPORT_SYMBOL_GPL(wlcore_wake_queues);
-void wlcore_reset_stopped_queues(struct wl1271 *wl)
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 queue,
+ enum wlcore_queue_stop_reason reason)
{
- int i;
unsigned long flags;
+ bool stopped;
spin_lock_irqsave(&wl->wl_lock, flags);
-
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- if (!wl->queue_stop_reasons[i])
- continue;
-
- wl->queue_stop_reasons[i] = 0;
- ieee80211_wake_queue(wl->hw,
- wl1271_tx_get_mac80211_queue(i));
- }
-
+ stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
+ reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return stopped;
}
-bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
- enum wlcore_queue_stop_reason reason)
+bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 queue,
+ enum wlcore_queue_stop_reason reason)
{
- return test_bit(reason, &wl->queue_stop_reasons[queue]);
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+
+ WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
+ return test_bit(reason, &wl->queue_stop_reasons[hwq]);
}
-bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
+bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue)
{
- return !!wl->queue_stop_reasons[queue];
+ int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
+
+ WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
+ return !!wl->queue_stop_reasons[hwq];
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 349520d8b72..55aa4acf910 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -207,19 +207,22 @@ static inline int wl1271_tx_get_queue(int queue)
}
}
-static inline int wl1271_tx_get_mac80211_queue(int queue)
+static inline
+int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue)
{
+ int mac_queue = wlvif->hw_queue_base;
+
switch (queue) {
case CONF_TX_AC_VO:
- return 0;
+ return mac_queue + 0;
case CONF_TX_AC_VI:
- return 1;
+ return mac_queue + 1;
case CONF_TX_AC_BE:
- return 2;
+ return mac_queue + 2;
case CONF_TX_AC_BK:
- return 3;
+ return mac_queue + 3;
default:
- return 2;
+ return mac_queue + 2;
}
}
@@ -252,20 +255,26 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length);
void wl1271_free_tx_id(struct wl1271 *wl, int id);
-void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
- enum wlcore_queue_stop_reason reason);
-void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue, enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason);
-void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason);
void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason);
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason);
-void wlcore_reset_stopped_queues(struct wl1271 *wl);
-bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason);
-bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
+bool
+wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 queue,
+ enum wlcore_queue_stop_reason reason);
+bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue);
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index c3884937c00..af9fecaefc3 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -37,6 +37,9 @@
*/
#define WLCORE_NUM_MAC_ADDRESSES 3
+/* wl12xx/wl18xx maximum transmission power (in dBm) */
+#define WLCORE_MAX_TXPWR 25
+
/* forward declaration */
struct wl1271_tx_hw_descr;
enum wl_rx_buf_align;
@@ -51,6 +54,9 @@ struct wlcore_ops {
int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
void *buf, size_t len);
int (*ack_event)(struct wl1271 *wl);
+ int (*wait_for_event)(struct wl1271 *wl, enum wlcore_wait_event event,
+ bool *timeout);
+ int (*process_mailbox_events)(struct wl1271 *wl);
u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
void (*set_tx_desc_blocks)(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
@@ -82,12 +88,32 @@ struct wlcore_ops {
int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
int (*handle_static_data)(struct wl1271 *wl,
struct wl1271_static_data *static_data);
+ int (*scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_scan_request *req);
+ int (*scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+ int (*sched_scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+ void (*sched_scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
+ int (*channel_switch)(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_channel_switch *ch_switch);
u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
+ void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u32 changed);
+ int (*set_peer_cap)(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation,
+ u32 rate_set, u8 hlid);
+ bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk);
+ bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
+ struct wl1271_link *lnk);
};
enum wlcore_partitions {
@@ -157,7 +183,6 @@ struct wl1271 {
struct wl1271_if_operations *if_ops;
- void (*set_power)(bool enable);
int irq;
spinlock_t wl_lock;
@@ -202,6 +227,8 @@ struct wl1271 {
unsigned long klv_templates_map[
BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
+ u8 session_ids[WL12XX_MAX_LINKS];
+
struct list_head wlvif_list;
u8 sta_count;
@@ -227,7 +254,8 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
int tx_queue_count[NUM_TX_QUEUES];
- unsigned long queue_stop_reasons[NUM_TX_QUEUES];
+ unsigned long queue_stop_reasons[
+ NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES];
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
@@ -269,24 +297,30 @@ struct wl1271 {
struct work_struct recovery_work;
bool watchdog_recovery;
+ /* Reg domain last configuration */
+ u32 reg_ch_conf_last[2];
+ /* Reg domain pending configuration */
+ u32 reg_ch_conf_pending[2];
+
/* Pointer that holds DMA-friendly block for the mailbox */
- struct event_mailbox *mbox;
+ void *mbox;
/* The mbox event mask */
u32 event_mask;
/* Mailbox pointers */
+ u32 mbox_size;
u32 mbox_ptr[2];
/* Are we currently scanning */
- struct ieee80211_vif *scan_vif;
+ struct wl12xx_vif *scan_wlvif;
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
- /* Connection loss work */
- struct delayed_work connection_loss_work;
+ struct ieee80211_vif *roc_vif;
+ struct delayed_work roc_complete_work;
- bool sched_scanning;
+ struct wl12xx_vif *sched_vif;
/* The current band */
enum ieee80211_band band;
@@ -299,7 +333,7 @@ struct wl1271 {
struct wl1271_stats stats;
- __le32 buffer_32;
+ __le32 *buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
@@ -314,6 +348,8 @@ struct wl1271 {
bool enable_11a;
+ int recovery_count;
+
/* Most recently reported noise in dBm */
s8 noise;
@@ -333,6 +369,12 @@ struct wl1271 {
*/
struct wl1271_link links[WL12XX_MAX_LINKS];
+ /* number of currently active links */
+ int active_link_count;
+
+ /* Fast/slow links bitmap according to FW */
+ u32 fw_fast_lnk_map;
+
/* AP-mode - a bitmap of links currently in PS mode according to FW */
u32 ap_fw_ps_map;
@@ -367,6 +409,12 @@ struct wl1271 {
const char *sr_fw_name;
const char *mr_fw_name;
+ u8 scan_templ_id_2_4;
+ u8 scan_templ_id_5;
+ u8 sched_scan_templ_id_2_4;
+ u8 sched_scan_templ_id_5;
+ u8 max_channels_5;
+
/* per-chip-family private structure */
void *priv;
@@ -408,20 +456,28 @@ struct wl1271 {
/* the number of allocated MAC addresses in this chip */
int num_mac_addr;
- /* the minimum FW version required for the driver to work */
- unsigned int min_fw_ver[NUM_FW_VER];
+ /* minimum FW version required for the driver to work in single-role */
+ unsigned int min_sr_fw_ver[NUM_FW_VER];
+
+ /* minimum FW version required for the driver to work in multi-role */
+ unsigned int min_mr_fw_ver[NUM_FW_VER];
struct completion nvs_loading_complete;
+
+ /* number of concurrent channels the HW supports */
+ u32 num_channels;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
int wlcore_remove(struct platform_device *pdev);
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
+ u32 mbox_size);
int wlcore_free_hw(struct wl1271 *wl);
int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
+void wlcore_regdomain_config(struct wl1271 *wl);
static inline void
wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
@@ -430,16 +486,27 @@ wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
}
+/* Tell wlcore not to care about this element when checking the version */
+#define WLCORE_FW_VER_IGNORE -1
+
static inline void
wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
- unsigned int iftype, unsigned int major,
- unsigned int subtype, unsigned int minor)
+ unsigned int iftype_sr, unsigned int major_sr,
+ unsigned int subtype_sr, unsigned int minor_sr,
+ unsigned int iftype_mr, unsigned int major_mr,
+ unsigned int subtype_mr, unsigned int minor_mr)
{
- wl->min_fw_ver[FW_VER_CHIP] = chip;
- wl->min_fw_ver[FW_VER_IF_TYPE] = iftype;
- wl->min_fw_ver[FW_VER_MAJOR] = major;
- wl->min_fw_ver[FW_VER_SUBTYPE] = subtype;
- wl->min_fw_ver[FW_VER_MINOR] = minor;
+ wl->min_sr_fw_ver[FW_VER_CHIP] = chip;
+ wl->min_sr_fw_ver[FW_VER_IF_TYPE] = iftype_sr;
+ wl->min_sr_fw_ver[FW_VER_MAJOR] = major_sr;
+ wl->min_sr_fw_ver[FW_VER_SUBTYPE] = subtype_sr;
+ wl->min_sr_fw_ver[FW_VER_MINOR] = minor_sr;
+
+ wl->min_mr_fw_ver[FW_VER_CHIP] = chip;
+ wl->min_mr_fw_ver[FW_VER_IF_TYPE] = iftype_mr;
+ wl->min_mr_fw_ver[FW_VER_MAJOR] = major_mr;
+ wl->min_mr_fw_ver[FW_VER_SUBTYPE] = subtype_mr;
+ wl->min_mr_fw_ver[FW_VER_MINOR] = minor_mr;
}
/* Firmware image load chunk size */
@@ -450,6 +517,9 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
+/* the first start_role(sta) sometimes doesn't work on wl12xx */
+#define WLCORE_QUIRK_START_STA_FAILS BIT(1)
+
/* wl127x and SPI don't support SDIO block size alignment */
#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
@@ -462,9 +532,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* Older firmwares use an old NVS format */
#define WLCORE_QUIRK_LEGACY_NVS BIT(5)
-/* Some firmwares may not support ELP */
-#define WLCORE_QUIRK_NO_ELP BIT(6)
-
/* pad only the last frame in the aggregate buffer */
#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7)
@@ -477,11 +544,11 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* separate probe response templates for one-shot and sched scans */
#define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10)
-/* TODO: move to the lower drivers when all usages are abstracted */
-#define CHIP_ID_1271_PG10 (0x4030101)
-#define CHIP_ID_1271_PG20 (0x4030111)
-#define CHIP_ID_1283_PG10 (0x05030101)
-#define CHIP_ID_1283_PG20 (0x05030111)
+/* Firmware requires reg domain configuration for active calibration */
+#define WLCORE_QUIRK_REGDOMAIN_CONF BIT(11)
+
+/* The FW only support a zero session id for AP */
+#define WLCORE_QUIRK_AP_ZERO_SESSION_ID BIT(12)
/* TODO: move all these common registers and values elsewhere */
#define HW_ACCESS_ELP_CTRL_REG 0x1FFFC
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 5ce26cf402f..508f5b0f8a7 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -109,17 +109,6 @@ enum {
NUM_FW_VER
};
-#define FW_VER_CHIP_WL127X 6
-#define FW_VER_CHIP_WL128X 7
-
-#define FW_VER_IF_TYPE_STA 1
-#define FW_VER_IF_TYPE_AP 2
-
-#define FW_VER_MINOR_1_SPARE_STA_MIN 58
-#define FW_VER_MINOR_1_SPARE_AP_MIN 47
-
-#define FW_VER_MINOR_FWLOG_STA_MIN 70
-
struct wl1271_chip {
u32 id;
char fw_ver_str[ETHTOOL_FWVERS_LEN];
@@ -141,7 +130,10 @@ struct wl_fw_packet_counters {
/* Cumulative counter of released Voice memory blocks */
u8 tx_voice_released_blks;
- u8 padding[3];
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
} __packed;
/* FW status registers */
@@ -214,6 +206,11 @@ struct wl1271_if_operations {
void (*set_block_size) (struct device *child, unsigned int blksz);
};
+struct wlcore_platdev_data {
+ struct wl12xx_platform_data *pdata;
+ struct wl1271_if_operations *if_ops;
+};
+
#define MAX_NUM_KEYS 14
#define MAX_KEY_SIZE 32
@@ -260,6 +257,8 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_IN_USE,
};
+struct wl12xx_vif;
+
struct wl1271_link {
/* AP-mode - TX queue per AC in link */
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
@@ -272,6 +271,9 @@ struct wl1271_link {
/* bitmap of TIDs where RX BA sessions are active for this link */
u8 ba_bitmap;
+
+ /* The wlvif this link belongs to. Might be null for global links */
+ struct wl12xx_vif *wlvif;
};
#define WL1271_MAX_RX_FILTERS 5
@@ -315,6 +317,7 @@ struct wl12xx_rx_filter {
struct wl1271_station {
u8 hlid;
+ bool in_connection;
};
struct wl12xx_vif {
@@ -332,7 +335,6 @@ struct wl12xx_vif {
union {
struct {
u8 hlid;
- u8 ba_rx_bitmap;
u8 basic_rate_idx;
u8 ap_rate_idx;
@@ -341,6 +343,8 @@ struct wl12xx_vif {
u8 klv_template_id;
bool qos;
+ /* channel type we started the STA role with */
+ enum nl80211_channel_type role_chan_type;
} sta;
struct {
u8 global_hlid;
@@ -362,6 +366,9 @@ struct wl12xx_vif {
/* the hlid of the last transmitted skb */
int last_tx_hlid;
+ /* counters of packets per AC, across all links in the vif */
+ int tx_queue_count[NUM_TX_QUEUES];
+
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
@@ -396,9 +403,6 @@ struct wl12xx_vif {
/* Our association ID */
u16 aid;
- /* Session counter for the chipset */
- int session_counter;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -416,11 +420,28 @@ struct wl12xx_vif {
bool ba_support;
bool ba_allowed;
+ bool wmm_enabled;
+
/* Rx Streaming */
struct work_struct rx_streaming_enable_work;
struct work_struct rx_streaming_disable_work;
struct timer_list rx_streaming_timer;
+ struct delayed_work channel_switch_work;
+ struct delayed_work connection_loss_work;
+
+ /* number of in connection stations */
+ int inconn_count;
+
+ /*
+ * This vif's queues are mapped to mac80211 HW queues as:
+ * VO - hw_queue_base
+ * VI - hw_queue_base + 1
+ * BE - hw_queue_base + 2
+ * BK - hw_queue_base + 3
+ */
+ int hw_queue_base;
+
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -443,6 +464,7 @@ struct wl12xx_vif {
static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
{
+ WARN_ON(!vif);
return (struct wl12xx_vif *)vif->drv_priv;
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 94b79c3338c..9d7f1723dd8 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
/* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f733cae3d4b..d9841416848 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
static void xenvif_down(struct xenvif *vif)
{
disable_irq(vif->irq);
+ del_timer_sync(&vif->credit_timeout);
xen_netbk_deschedule_xenvif(vif);
xen_netbk_remove_xenvif(vif);
}
@@ -345,23 +346,26 @@ err:
return err;
}
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
{
struct net_device *dev = vif->dev;
- if (netif_carrier_ok(dev)) {
- rtnl_lock();
- netif_carrier_off(dev); /* discard queued packets */
- if (netif_running(dev))
- xenvif_down(vif);
- rtnl_unlock();
- xenvif_put(vif);
- }
+
+ rtnl_lock();
+ netif_carrier_off(dev); /* discard queued packets */
+ if (netif_running(dev))
+ xenvif_down(vif);
+ rtnl_unlock();
+ xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+ if (netif_carrier_ok(vif->dev))
+ xenvif_carrier_off(vif);
atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
- del_timer_sync(&vif->credit_timeout);
-
if (vif->irq)
unbind_from_irqhandler(vif->irq, vif);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f2d6b78d901..cd49ba94963 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
atomic_dec(&netbk->netfront_count);
}
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ u8 status);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- if (cons >= end)
+ if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
xenvif_put(vif);
}
+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+ netdev_err(vif->dev, "fatal error; disabling device\n");
+ xenvif_carrier_off(vif);
+ xenvif_put(vif);
+}
+
static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
@@ -901,29 +909,33 @@ static int netbk_count_requests(struct xenvif *vif,
do {
if (frags >= work_to_do) {
- netdev_dbg(vif->dev, "Need more frags\n");
- return -frags;
+ netdev_err(vif->dev, "Need more frags\n");
+ netbk_fatal_tx_err(vif);
+ return -ENODATA;
}
if (unlikely(frags >= MAX_SKB_FRAGS)) {
- netdev_dbg(vif->dev, "Too many frags\n");
- return -frags;
+ netdev_err(vif->dev, "Too many frags\n");
+ netbk_fatal_tx_err(vif);
+ return -E2BIG;
}
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
sizeof(*txp));
if (txp->size > first->size) {
- netdev_dbg(vif->dev, "Frags galore\n");
- return -frags;
+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ netbk_fatal_tx_err(vif);
+ return -EIO;
}
first->size -= txp->size;
frags++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size);
- return -frags;
+ netbk_fatal_tx_err(vif);
+ return -EINVAL;
}
} while ((txp++)->flags & XEN_NETTXF_more_data);
return frags;
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
pending_idx = netbk->pending_ring[index];
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page)
- return NULL;
+ goto err;
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
}
return gop;
+err:
+ /* Unwind, freeing all pages and sending error responses. */
+ while (i-- > start) {
+ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+ XEN_NETIF_RSP_ERROR);
+ }
+ /* The head too, if necessary. */
+ if (start)
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ return NULL;
}
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
- struct xenvif *vif = pending_tx_info[pending_idx].vif;
- struct xen_netif_tx_request *txp;
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i, err, start;
/* Check status of header. */
err = gop->status;
- if (unlikely(err)) {
- pending_ring_idx_t index;
- index = pending_index(netbk->pending_prod++);
- txp = &pending_tx_info[pending_idx].req;
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- netbk->pending_ring[index] = pending_idx;
- xenvif_put(vif);
- }
+ if (unlikely(err))
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
- pending_ring_idx_t index;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
continue;
}
/* Error on this fragment: respond to client with an error. */
- txp = &netbk->pending_tx_info[pending_idx].req;
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- index = pending_index(netbk->pending_prod++);
- netbk->pending_ring[index] = pending_idx;
- xenvif_put(vif);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
/* Take an extra reference to offset xen_netbk_idx_release */
get_page(netbk->mmap_pages[pending_idx]);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
}
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
do {
if (unlikely(work_to_do-- <= 0)) {
- netdev_dbg(vif->dev, "Missing extra info\n");
+ netdev_err(vif->dev, "Missing extra info\n");
+ netbk_fatal_tx_err(vif);
return -EBADR;
}
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
- netdev_dbg(vif->dev,
+ netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+ netdev_err(vif->dev, "GSO size must not be zero.\n");
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
+ /* This can sometimes happen because the test of
+ * list_empty(net_schedule_list) at the top of the
+ * loop is unlocked. Just go back and have another
+ * look.
+ */
if (!vif)
continue;
+ if (vif->tx.sring->req_prod - vif->tx.req_cons >
+ XEN_NETIF_TX_RING_SIZE) {
+ netdev_err(vif->dev,
+ "Impossible number of requests. "
+ "req_prod %d, req_cons %d, size %ld\n",
+ vif->tx.sring->req_prod, vif->tx.req_cons,
+ XEN_NETIF_TX_RING_SIZE);
+ netbk_fatal_tx_err(vif);
+ continue;
+ }
+
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
- if (unlikely(work_to_do < 0)) {
- netbk_tx_err(vif, &txreq, idx);
+ if (unlikely(work_to_do < 0))
continue;
- }
}
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
- if (unlikely(ret < 0)) {
- netbk_tx_err(vif, &txreq, idx - ret);
+ if (unlikely(ret < 0))
continue;
- }
+
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
- netdev_dbg(vif->dev,
+ netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- netbk_tx_err(vif, &txreq, idx);
+ netbk_fatal_tx_err(vif);
continue;
}
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) {
+ /* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
continue;
}
}
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
xen_netbk_tx_submit(netbk);
}
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ u8 status)
{
struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
vif = pending_tx_info->vif;
- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+ make_tx_response(vif, &pending_tx_info->req, status);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;