summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig43
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c90
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c104
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c299
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-rx.c)302
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c104
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c943
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c992
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c119
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1499
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h216
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h90
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-cfg.h (renamed from drivers/net/wireless/iwlwifi/iwl-5000-hw.h)73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c405
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h179
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c450
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h556
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c306
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h184
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c192
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c764
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h534
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c832
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h436
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c1435
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c)699
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c1998
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c979
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c1115
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h278
55 files changed, 9606 insertions, 8931 deletions
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ad3bdba6bee..57703d5209d 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,5 +1,5 @@
-config IWLAGN
- tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
+config IWLWIFI
+ tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
depends on PCI && MAC80211
select FW_LOADER
select NEW_LEDS
@@ -39,14 +39,14 @@ config IWLAGN
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwlagn.
+ module will be called iwlwifi.
menu "Debugging Options"
- depends on IWLAGN
+ depends on IWLWIFI
config IWLWIFI_DEBUG
- bool "Enable full debugging output in the iwlagn driver"
- depends on IWLAGN
+ bool "Enable full debugging output in the iwlwifi driver"
+ depends on IWLWIFI
---help---
This option will enable debug tracing output for the iwlwifi drivers
@@ -54,13 +54,13 @@ config IWLWIFI_DEBUG
control which debug output is sent to the kernel log by setting the
value in
- /sys/class/net/wlan0/device/debug_level
+ /sys/module/iwlwifi/parameters/debug
This entry will only exist if this option is enabled.
To set a value, simply echo an 8-byte hex value to the same file:
- % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+ % echo 0x43fff > /sys/module/iwlwifi/parameters/debug
You can find the list of debug mask values in:
drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -70,8 +70,8 @@ config IWLWIFI_DEBUG
any problems you may encounter.
config IWLWIFI_DEBUGFS
- bool "iwlagn debugfs support"
- depends on IWLAGN && MAC80211_DEBUGFS
+ bool "iwlwifi debugfs support"
+ depends on IWLWIFI && MAC80211_DEBUGFS
---help---
Enable creation of debugfs files for the iwlwifi drivers. This
is a low-impact option that allows getting insight into the
@@ -79,13 +79,13 @@ config IWLWIFI_DEBUGFS
config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
bool "Experimental uCode support"
- depends on IWLAGN && IWLWIFI_DEBUG
+ depends on IWLWIFI && IWLWIFI_DEBUG
---help---
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
- depends on IWLAGN
+ depends on IWLWIFI
depends on EVENT_TRACING
help
Say Y here to trace all commands, including TX frames and IO
@@ -104,27 +104,10 @@ endmenu
config IWLWIFI_DEVICE_SVTOOL
bool "iwlwifi device svtool support"
- depends on IWLAGN
+ depends on IWLWIFI
select NL80211_TESTMODE
help
This option enables the svtool support for iwlwifi device through
NL80211_TESTMODE. svtool is a software validation tool that runs in
the user space and interacts with the device in the kernel space
through the generic netlink message via NL80211_TESTMODE channel.
-
-config IWL_P2P
- bool "iwlwifi experimental P2P support"
- depends on IWLAGN
- help
- This option enables experimental P2P support for some devices
- based on microcode support. Since P2P support is still under
- development, this option may even enable it for some devices
- now that turn out to not support it in the future due to
- microcode restrictions.
-
- To determine if your microcode supports the experimental P2P
- offered by this option, check if the driver advertises AP
- support when it is loaded.
-
- Say Y only if you want to experiment with P2P.
-
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 48ab9142af3..c73e5ed8db5 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,24 +1,24 @@
-# AGN
-obj-$(CONFIG_IWLAGN) += iwlagn.o
-iwlagn-objs := iwl-agn.o iwl-agn-rs.o
-iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
-iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
-iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
+# WIFI
+obj-$(CONFIG_IWLWIFI) += iwlwifi.o
+iwlwifi-objs := iwl-agn.o iwl-agn-rs.o
+iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
+iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
-iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o
-iwlagn-objs += iwl-rx.o iwl-sta.o
-iwlagn-objs += iwl-scan.o iwl-led.o
-iwlagn-objs += iwl-agn-rxon.o
-iwlagn-objs += iwl-5000.o
-iwlagn-objs += iwl-6000.o
-iwlagn-objs += iwl-1000.o
-iwlagn-objs += iwl-2000.o
-iwlagn-objs += iwl-pci.o
-iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o
+iwlwifi-objs += iwl-core.o iwl-eeprom.o iwl-power.o
+iwlwifi-objs += iwl-scan.o iwl-led.o
+iwlwifi-objs += iwl-agn-rxon.o
+iwlwifi-objs += iwl-5000.o
+iwlwifi-objs += iwl-6000.o
+iwlwifi-objs += iwl-1000.o
+iwlwifi-objs += iwl-2000.o
+iwlwifi-objs += iwl-pci.o
+iwlwifi-objs += iwl-trans.o
+iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlagn-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 01b49eb8c8e..e12b48c2cff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -40,14 +39,18 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL1000_UCODE_API_MAX 5
-#define IWL100_UCODE_API_MAX 5
+#define IWL1000_UCODE_API_MAX 6
+#define IWL100_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL1000_UCODE_API_OK 5
+#define IWL100_UCODE_API_OK 5
/* Lowest firmware API version supported */
#define IWL1000_UCODE_API_MIN 1
@@ -73,21 +76,21 @@
static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
/* NIC configuration for 1000 series */
static void iwl1000_nic_config(struct iwl_priv *priv)
{
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
- iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG,
+ iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
APMG_SVR_DIGITAL_VOLTAGE_1_32,
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
@@ -124,44 +127,36 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl1000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl1000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -191,7 +186,6 @@ static struct iwl_base_params iwl1000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
@@ -201,12 +195,13 @@ static struct iwl_base_params iwl1000_base_params = {
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .smps_mode = IEEE80211_SMPS_STATIC,
+ .smps_mode = IEEE80211_SMPS_DYNAMIC,
};
#define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL1000_UCODE_API_OK, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
@@ -228,6 +223,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
#define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \
+ .ucode_api_ok = IWL100_UCODE_API_OK, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 0e13f0bb2e1..79431977a96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -40,17 +39,22 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
-#include "iwl-6000-hw.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL2030_UCODE_API_MAX 5
-#define IWL2000_UCODE_API_MAX 5
-#define IWL105_UCODE_API_MAX 5
-#define IWL135_UCODE_API_MAX 5
+#define IWL2030_UCODE_API_MAX 6
+#define IWL2000_UCODE_API_MAX 6
+#define IWL105_UCODE_API_MAX 6
+#define IWL135_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL2030_UCODE_API_OK 5
+#define IWL2000_UCODE_API_OK 5
+#define IWL105_UCODE_API_OK 5
+#define IWL135_UCODE_API_OK 5
/* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5
@@ -68,13 +72,13 @@
#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
#define IWL135_FW_PRE "iwlwifi-135-"
-#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE #api ".ucode"
+#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
/* NIC configuration for 2000 series */
@@ -83,7 +87,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
if (priv->cfg->iq_invert)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -119,45 +123,37 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl2000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl2000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
+ hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -175,7 +171,7 @@ static struct iwl_lib_ops iwl2000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -196,7 +192,7 @@ static struct iwl_lib_ops iwl2030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -209,7 +205,6 @@ static struct iwl_base_params iwl2000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -217,6 +212,7 @@ static struct iwl_base_params iwl2000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
+ .hd_v2 = true,
};
@@ -228,7 +224,6 @@ static struct iwl_base_params iwl2030_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -236,6 +231,7 @@ static struct iwl_base_params iwl2030_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
+ .hd_v2 = true,
};
static struct iwl_ht_params iwl2000_ht_params = {
@@ -256,6 +252,7 @@ static struct iwl_bt_params iwl2030_bt_params = {
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL2000_UCODE_API_OK, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -263,6 +260,7 @@ static struct iwl_bt_params iwl2030_bt_params = {
.base_params = &iwl2000_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
@@ -277,9 +275,16 @@ struct iwl_cfg iwl2000_2bg_cfg = {
IWL_DEVICE_2000,
};
+struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ .name = "2000D Series 2x2 BGN",
+ IWL_DEVICE_2000,
+ .ht_params = &iwl2000_ht_params,
+};
+
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_ok = IWL2030_UCODE_API_OK, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -288,6 +293,7 @@ struct iwl_cfg iwl2000_2bg_cfg = {
.bt_params = &iwl2030_bt_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
.iq_invert = true \
@@ -306,6 +312,7 @@ struct iwl_cfg iwl2030_2bg_cfg = {
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
+ .ucode_api_ok = IWL105_UCODE_API_OK, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -313,6 +320,7 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.base_params = &iwl2000_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
.rx_with_siso_diversity = true, \
@@ -329,9 +337,16 @@ struct iwl_cfg iwl105_bgn_cfg = {
.ht_params = &iwl2000_ht_params,
};
+struct iwl_cfg iwl105_bgn_d_cfg = {
+ .name = "105D Series 1x1 BGN",
+ IWL_DEVICE_105,
+ .ht_params = &iwl2000_ht_params,
+};
+
#define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \
+ .ucode_api_ok = IWL135_UCODE_API_OK, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
@@ -340,6 +355,7 @@ struct iwl_cfg iwl105_bgn_cfg = {
.bt_params = &iwl2030_bt_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
+ .temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
.rx_with_siso_diversity = true, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c95cefd529d..c511c98a89a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -31,7 +31,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -41,12 +40,11 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
#include "iwl-agn.h"
#include "iwl-agn-hw.h"
-#include "iwl-5000-hw.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -69,27 +67,27 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
/* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
- .min_nrg_cck = 95,
+ .min_nrg_cck = 100,
.max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 120,
- .auto_corr_min_ofdm_mrc_x1 = 240,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
@@ -98,10 +96,10 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 170,
+ .auto_corr_min_cck_mrc = 200,
.auto_corr_max_cck_mrc = 400,
- .nrg_th_cck = 95,
- .nrg_th_ofdm = 95,
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
.barker_corr_th_min = 190,
.barker_corr_th_min_mrc = 390,
@@ -134,19 +132,34 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.nrg_th_cca = 62,
};
+#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
+
+static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+{
+ u16 temperature, voltage;
+ __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+ EEPROM_KELVIN_TEMPERATURE);
+
+ temperature = le16_to_cpu(temp_calib[0]);
+ voltage = le16_to_cpu(temp_calib[1]);
+
+ /* offset = temp - volt / coeff */
+ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+}
+
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
iwl_temp_calib_to_offset(priv);
- priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
+ hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
@@ -156,39 +169,32 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl5000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl5000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
-
return 0;
}
@@ -199,38 +205,31 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl5150_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl5150_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -252,7 +251,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
{
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl5000_channel_switch_cmd cmd;
@@ -315,7 +314,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return trans_send_cmd(&priv->trans, &hcmd);
+ return iwl_trans_send_cmd(trans(priv), &hcmd);
}
static struct iwl_lib_ops iwl5000_lib = {
@@ -360,7 +359,6 @@ static struct iwl_base_params iwl5000_base_params = {
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
deleted file mode 100644
index b27986e57c9..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- */
-
-#ifndef __iwl_6000_hw_h__
-#define __iwl_6000_hw_h__
-
-#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
-#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
-#define IWL60_RTC_INST_SIZE \
- (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
-#define IWL60_RTC_DATA_SIZE \
- (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
-
-#endif /* __iwl_6000_hw_h__ */
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 973d1972e8c..c840c78278d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -40,17 +39,19 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
-#include "iwl-6000-hw.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-cfg.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 5
-#define IWL6000G2_UCODE_API_MAX 5
+#define IWL6000G2_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL6000G2_UCODE_API_OK 5
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
@@ -72,15 +73,15 @@
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+ hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
+ hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwlagn_eeprom_calib_version(priv) >= 6)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -88,9 +89,9 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwlagn_eeprom_calib_version(priv) >= 6)
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
- iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_6050_1x2);
}
@@ -102,7 +103,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
/* no locking required for register write */
if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
- iwl_write32(priv, CSR_GP_DRIVER_REG,
+ iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
@@ -111,7 +112,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
- .min_nrg_cck = 97,
+ .min_nrg_cck = 110,
.max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 80,
.auto_corr_min_ofdm_mrc = 128,
@@ -127,11 +128,11 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.auto_corr_max_cck = 175,
.auto_corr_min_cck_mrc = 160,
.auto_corr_max_cck_mrc = 310,
- .nrg_th_cck = 97,
- .nrg_th_ofdm = 100,
+ .nrg_th_cck = 110,
+ .nrg_th_ofdm = 110,
.barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
+ .barker_corr_th_min_mrc = 336,
.nrg_th_cca = 62,
};
@@ -142,45 +143,38 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+ hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
+ hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
+ hw_params(priv).rx_chains_num = 1;
else
- priv->hw_params.rx_chains_num =
+ hw_params(priv).rx_chains_num =
num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ hw_params(priv).sens = &iwl6000_sensitivity;
+ hw_params(priv).calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
+ hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
- priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -190,7 +184,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
{
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
@@ -253,7 +247,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return trans_send_cmd(&priv->trans, &hcmd);
+ return iwl_trans_send_cmd(trans(priv), &hcmd);
}
static struct iwl_lib_ops iwl6000_lib = {
@@ -270,7 +264,7 @@ static struct iwl_lib_ops iwl6000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -292,7 +286,7 @@ static struct iwl_lib_ops iwl6030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
},
.temperature = iwlagn_temperature,
};
@@ -305,7 +299,6 @@ static struct iwl_base_params iwl6000_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -323,7 +316,6 @@ static struct iwl_base_params iwl6050_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -340,7 +332,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
- .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -365,8 +356,9 @@ static struct iwl_bt_params iwl6000_bt_params = {
};
#define IWL_DEVICE_6005 \
- .fw_name_pre = IWL6005_FW_PRE, \
+ .fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
@@ -392,9 +384,22 @@ struct iwl_cfg iwl6005_2bg_cfg = {
IWL_DEVICE_6005,
};
+struct iwl_cfg iwl6005_2agn_sff_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+struct iwl_cfg iwl6005_2agn_d_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
#define IWL_DEVICE_6030 \
- .fw_name_pre = IWL6030_FW_PRE, \
+ .fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 72d6297602b..03bac48558b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -93,12 +93,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
};
for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
+ if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
priv->calib_results[i].buf) {
hcmd.len[0] = priv->calib_results[i].buf_len;
hcmd.data[0] = priv->calib_results[i].buf;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = trans_send_cmd(&priv->trans, &hcmd);
+ ret = iwl_trans_send_cmd(trans(priv), &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d iteration %d\n",
ret, i);
@@ -174,7 +174,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
data = &(priv->sensitivity_data);
@@ -357,7 +357,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
data = &(priv->sensitivity_data);
@@ -484,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- return trans_send_cmd(&priv->trans, &cmd_out);
+ return iwl_trans_send_cmd(trans(priv), &cmd_out);
}
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
@@ -505,28 +505,53 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
- cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
- HD_INA_NON_SQUARE_DET_OFDM_DATA;
- cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
- HD_INA_NON_SQUARE_DET_CCK_DATA;
- cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
- HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
- HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
- HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
- HD_OFDM_NON_SQUARE_DET_SLOPE_DATA;
- cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
- HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
- HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
- HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
- HD_CCK_NON_SQUARE_DET_SLOPE_DATA;
- cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
- HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA;
+ if (priv->cfg->base_params->hd_v2) {
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
+ HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
+ HD_INA_NON_SQUARE_DET_CCK_DATA_V2;
+ cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
+ HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2;
+ } else {
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
+ HD_INA_NON_SQUARE_DET_OFDM_DATA_V1;
+ cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
+ HD_INA_NON_SQUARE_DET_CCK_DATA_V1;
+ cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
+ HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1;
+ cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
+ HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1;
+ cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
+ HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1;
+ }
/* Update uCode's "work" table, and copy it to DSP */
cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
@@ -548,7 +573,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
- return trans_send_cmd(&priv->trans, &cmd_out);
+ return iwl_trans_send_cmd(trans(priv), &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -556,7 +581,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
int ret = 0;
int i;
struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
if (priv->disable_sens_cal)
return;
@@ -633,13 +658,13 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
rx_info = &priv->statistics.rx_non_phy;
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
@@ -663,7 +688,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
statis.beacon_energy_c =
le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
@@ -741,12 +766,9 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
u8 first_chain;
u16 i = 0;
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS;
+ average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS;
+ average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -796,21 +818,21 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= hw_params(priv).valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
* priv->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(hw_params(priv).valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == hw_params(priv).tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
@@ -827,12 +849,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
+ if (active_chains != hw_params(priv).valid_rx_ant &&
active_chains != priv->chain_noise_data.active_chains)
IWL_DEBUG_CALIB(priv,
"Detected that not all antennas are connected! "
"Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ active_chains,
+ hw_params(priv).valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
@@ -892,7 +915,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
priv->phy_calib_chain_noise_gain_cmd);
cmd.delta_gain_1 = data->delta_gain_code[1];
cmd.delta_gain_2 = data->delta_gain_code[2];
- trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
+ iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
CMD_ASYNC, sizeof(cmd), &cmd);
data->radio_write = 1;
@@ -950,13 +973,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
rx_info = &priv->statistics.rx_non_phy;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
@@ -971,7 +994,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return;
}
@@ -990,7 +1013,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
data->beacon_count++;
@@ -1012,8 +1035,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count !=
- priv->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != IWL_CAL_NUM_BEACONS)
return;
/* Analyze signal for disconnected antenna */
@@ -1021,7 +1043,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
- data->active_chains = priv->hw_params.valid_rx_ant;
+ data->active_chains = hw_params(priv).valid_rx_ant;
for (i = 0; i < NUM_RX_CHAINS; i++)
if (!(data->active_chains & (1<<i)))
data->disconn_array[i] = 1;
@@ -1029,12 +1051,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
iwl_find_disconn_antenna(priv, average_sig, data);
/* Analyze noise for rx balance */
- average_noise[0] = data->chain_noise_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[1] = data->chain_noise_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[2] = data->chain_noise_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS;
+ average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS;
+ average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS;
for (i = 0; i < NUM_RX_CHAINS; i++) {
if (!(data->disconn_array[i]) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
deleted file mode 100644
index b8347db850e..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-agn.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-int iwl_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwlagn_eeprom_calib_version(priv);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-int iwl_eeprom_check_sku(struct iwl_priv *priv)
-{
- u16 radio_cfg;
-
- if (!priv->cfg->sku) {
- /* not using sku overwrite */
- priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
- IWL_ERR(priv, "Invalid 11n configuration\n");
- return -EINVAL;
- }
- }
- if (!priv->cfg->sku) {
- IWL_ERR(priv, "Invalid device sku\n");
- return -EINVAL;
- }
-
- IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
-
- if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
- /* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
- priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
- priv->cfg->valid_tx_ant,
- priv->cfg->valid_rx_ant);
- return -EINVAL;
- }
- IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
- priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
- }
- /*
- * for some special cases,
- * EEPROM did not reflect the correct antenna setting
- * so overwrite the valid tx/rx antenna from .cfg
- */
- return 0;
-}
-
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
-
-/**
- * iwl_get_max_txpower_avg - get the highest tx power from all chains.
- * find the highest tx power from all chains for the channel
- */
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
- int element, s8 *max_txpower_in_half_dbm)
-{
- s8 max_txpower_avg = 0; /* (dBm) */
-
- /* Take the highest tx power from any valid chains */
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
- (enhanced_txpower[element].chain_a_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
- (enhanced_txpower[element].chain_b_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
- (enhanced_txpower[element].chain_c_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((priv->cfg->valid_tx_ant == ANT_AB) |
- (priv->cfg->valid_tx_ant == ANT_BC) |
- (priv->cfg->valid_tx_ant == ANT_AC)) &&
- (enhanced_txpower[element].mimo2_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
- (enhanced_txpower[element].mimo3_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo3_max;
-
- /*
- * max. tx power in EEPROM is in 1/2 dBm format
- * convert from 1/2 dBm to dBm (round-up convert)
- * but we also do not want to loss 1/2 dBm resolution which
- * will impact performance
- */
- *max_txpower_in_half_dbm = max_txpower_avg;
- return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
-}
-
-static void
-iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *txp,
- s8 max_txpower_avg)
-{
- int ch_idx;
- bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
- enum ieee80211_band band;
-
- band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
-
- for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
- struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
-
- /* update matching channel or from common data only */
- if (txp->channel != 0 && ch_info->channel != txp->channel)
- continue;
-
- /* update matching band only */
- if (band != ch_info->band)
- continue;
-
- if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
- ch_info->max_power_avg = max_txpower_avg;
- ch_info->curr_txpow = max_txpower_avg;
- ch_info->scan_power = max_txpower_avg;
- }
-
- if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
- ch_info->ht40_max_power_avg = max_txpower_avg;
- }
-}
-
-#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
-#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
-#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
-
-#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
- ? # x " " : "")
-
-void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
-{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
- int idx, entries;
- __le16 *txp_len;
- s8 max_txp_avg, max_txp_avg_halfdbm;
-
- BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
-
- /* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
- entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
-
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
-
- for (idx = 0; idx < entries; idx++) {
- txp = &txp_array[idx];
- /* skip invalid entries */
- if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
- continue;
-
- IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
- (txp->channel && (txp->flags &
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
- "Common " : (txp->channel) ?
- "Channel" : "Common",
- (txp->channel),
- TXP_CHECK_AND_PRINT(VALID),
- TXP_CHECK_AND_PRINT(BAND_52G),
- TXP_CHECK_AND_PRINT(OFDM),
- TXP_CHECK_AND_PRINT(40MHZ),
- TXP_CHECK_AND_PRINT(HT_AP),
- TXP_CHECK_AND_PRINT(RES1),
- TXP_CHECK_AND_PRINT(RES2),
- TXP_CHECK_AND_PRINT(COMMON_TYPE),
- txp->flags);
- IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
- "chain_B: 0X%02x chain_C: 0X%02x\n",
- txp->chain_a_max, txp->chain_b_max,
- txp->chain_c_max);
- IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
- "MIMO3: 0x%02x High 20_on_40: 0x%02x "
- "Low 20_on_40: 0x%02x\n",
- txp->mimo2_max, txp->mimo3_max,
- ((txp->delta_20_in_40 & 0xf0) >> 4),
- (txp->delta_20_in_40 & 0x0f));
-
- max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
- &max_txp_avg_halfdbm);
-
- /*
- * Update the user limit values values to the highest
- * power supported by any channel
- */
- if (max_txp_avg > priv->tx_power_user_lmt)
- priv->tx_power_user_lmt = max_txp_avg;
- if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
- priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
-
- iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
- }
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 0e5b842529c..123ef5e129d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -78,10 +78,23 @@
#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
IWLAGN_RTC_DATA_LOWER_BOUND)
+#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
+#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
+#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
+#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
+#define IWL60_RTC_INST_SIZE \
+ (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
+#define IWL60_RTC_DATA_SIZE \
+ (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
+
/* RSSI to dBm */
#define IWLAGN_RSSI_OFFSET 44
-#define IWLAGN_DEFAULT_TX_RETRY 15
+#define IWLAGN_DEFAULT_TX_RETRY 15
+#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60
+#define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60
+#define IWLAGN_LOW_RETRY_LIMIT 7
/* Limit range of txpower output target to be between these values */
#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
@@ -92,20 +105,7 @@
#define IWLAGN_CMD_FIFO_NUM 7
#define IWLAGN_NUM_QUEUES 20
-#define IWLAGN_NUM_AMPDU_QUEUES 10
-#define IWLAGN_FIRST_AMPDU_QUEUE 10
-
-/* Fixed (non-configurable) rx data from phy */
-
-/**
- * struct iwlagn_schedq_bc_tbl scheduler byte count table
- * base physical address provided by SCD_DRAM_BASE_ADDR
- * @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
- */
-struct iwlagn_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __packed;
-
+#define IWLAGN_NUM_AMPDU_QUEUES 9
+#define IWLAGN_FIRST_AMPDU_QUEUE 11
#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 3bee0f119bc..1a52ed29f2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -35,454 +35,10 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
-#include "iwl-sta.h"
#include "iwl-trans.h"
-
-static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
-static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_POSTPONE_DELAY:
- priv->reply_tx_stats.pp_delay++;
- break;
- case TX_STATUS_POSTPONE_FEW_BYTES:
- priv->reply_tx_stats.pp_few_bytes++;
- break;
- case TX_STATUS_POSTPONE_BT_PRIO:
- priv->reply_tx_stats.pp_bt_prio++;
- break;
- case TX_STATUS_POSTPONE_QUIET_PERIOD:
- priv->reply_tx_stats.pp_quiet_period++;
- break;
- case TX_STATUS_POSTPONE_CALC_TTAK:
- priv->reply_tx_stats.pp_calc_ttak++;
- break;
- case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
- priv->reply_tx_stats.int_crossed_retry++;
- break;
- case TX_STATUS_FAIL_SHORT_LIMIT:
- priv->reply_tx_stats.short_limit++;
- break;
- case TX_STATUS_FAIL_LONG_LIMIT:
- priv->reply_tx_stats.long_limit++;
- break;
- case TX_STATUS_FAIL_FIFO_UNDERRUN:
- priv->reply_tx_stats.fifo_underrun++;
- break;
- case TX_STATUS_FAIL_DRAIN_FLOW:
- priv->reply_tx_stats.drain_flow++;
- break;
- case TX_STATUS_FAIL_RFKILL_FLUSH:
- priv->reply_tx_stats.rfkill_flush++;
- break;
- case TX_STATUS_FAIL_LIFE_EXPIRE:
- priv->reply_tx_stats.life_expire++;
- break;
- case TX_STATUS_FAIL_DEST_PS:
- priv->reply_tx_stats.dest_ps++;
- break;
- case TX_STATUS_FAIL_HOST_ABORTED:
- priv->reply_tx_stats.host_abort++;
- break;
- case TX_STATUS_FAIL_BT_RETRY:
- priv->reply_tx_stats.bt_retry++;
- break;
- case TX_STATUS_FAIL_STA_INVALID:
- priv->reply_tx_stats.sta_invalid++;
- break;
- case TX_STATUS_FAIL_FRAG_DROPPED:
- priv->reply_tx_stats.frag_drop++;
- break;
- case TX_STATUS_FAIL_TID_DISABLE:
- priv->reply_tx_stats.tid_disable++;
- break;
- case TX_STATUS_FAIL_FIFO_FLUSHED:
- priv->reply_tx_stats.fifo_flush++;
- break;
- case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
- priv->reply_tx_stats.insuff_cf_poll++;
- break;
- case TX_STATUS_FAIL_PASSIVE_NO_RX:
- priv->reply_tx_stats.fail_hw_drop++;
- break;
- case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
- priv->reply_tx_stats.sta_color_mismatch++;
- break;
- default:
- priv->reply_tx_stats.unknown++;
- break;
- }
-}
-
-static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
-{
- status &= AGG_TX_STATUS_MSK;
-
- switch (status) {
- case AGG_TX_STATE_UNDERRUN_MSK:
- priv->reply_agg_tx_stats.underrun++;
- break;
- case AGG_TX_STATE_BT_PRIO_MSK:
- priv->reply_agg_tx_stats.bt_prio++;
- break;
- case AGG_TX_STATE_FEW_BYTES_MSK:
- priv->reply_agg_tx_stats.few_bytes++;
- break;
- case AGG_TX_STATE_ABORT_MSK:
- priv->reply_agg_tx_stats.abort++;
- break;
- case AGG_TX_STATE_LAST_SENT_TTL_MSK:
- priv->reply_agg_tx_stats.last_sent_ttl++;
- break;
- case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
- priv->reply_agg_tx_stats.last_sent_try++;
- break;
- case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
- priv->reply_agg_tx_stats.last_sent_bt_kill++;
- break;
- case AGG_TX_STATE_SCD_QUERY_MSK:
- priv->reply_agg_tx_stats.scd_query++;
- break;
- case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
- priv->reply_agg_tx_stats.bad_crc32++;
- break;
- case AGG_TX_STATE_RESPONSE_MSK:
- priv->reply_agg_tx_stats.response++;
- break;
- case AGG_TX_STATE_DUMP_TX_MSK:
- priv->reply_agg_tx_stats.dump_tx++;
- break;
- case AGG_TX_STATE_DELAY_TX_MSK:
- priv->reply_agg_tx_stats.delay_tx++;
- break;
- default:
- priv->reply_agg_tx_stats.unknown++;
- break;
- }
-}
-
-static void iwlagn_set_tx_status(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_rxon_context *ctx,
- struct iwlagn_tx_resp *tx_resp,
- int txq_id, bool is_agg)
-{
- u16 status = le16_to_cpu(tx_resp->status.status);
-
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- if (is_agg)
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
- info);
- if (!iwl_is_tx_success(status))
- iwlagn_count_tx_err_status(priv, status);
-
- if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
- iwl_is_associated_ctx(ctx) && ctx->vif &&
- ctx->vif->type == NL80211_IFTYPE_STATION) {
- ctx->last_tx_rejected = true;
- iwl_stop_queue(priv, &priv->txq[txq_id]);
- }
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
- "0x%x retries %d\n",
- txq_id,
- iwl_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
-
-const char *iwl_get_agg_tx_fail_reason(u16 status)
-{
- status &= AGG_TX_STATUS_MSK;
- switch (status) {
- case AGG_TX_STATE_TRANSMITTED:
- return "SUCCESS";
- AGG_TX_STATE_FAIL(UNDERRUN_MSK);
- AGG_TX_STATE_FAIL(BT_PRIO_MSK);
- AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
- AGG_TX_STATE_FAIL(ABORT_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
- AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
- AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
- AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
- AGG_TX_STATE_FAIL(RESPONSE_MSK);
- AGG_TX_STATE_FAIL(DUMP_TX_MSK);
- AGG_TX_STATE_FAIL(DELAY_TX_MSK);
- }
-
- return "UNKNOWN";
-}
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwlagn_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = &tx_resp->status;
- struct ieee80211_hdr *hdr = NULL;
- int i, sh, idx;
- u16 seq;
-
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- agg->bitmap = 0;
-
- /* # frames attempted by Tx command */
- if (agg->frame_count == 1) {
- struct iwl_tx_info *txb;
-
- /* Only one frame was attempted; no block-ack will arrive */
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
- txb = &priv->txq[txq_id].txb[idx];
- iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
- txb->ctx, tx_resp, txq_id, true);
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
-
- /*
- * Start is the lowest frame sent. It may not be the first
- * frame in the batch; we figure this out dynamically during
- * the following loop.
- */
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & AGG_TX_STATUS_MSK)
- iwlagn_count_agg_tx_err_status(priv, status);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
- IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
- "try-count (0x%08x)\n",
- iwl_get_agg_tx_fail_reason(status),
- status & AGG_TX_STATUS_MSK,
- status & AGG_TX_TRY_MSK);
-
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc),
- hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- /*
- * sh -> how many frames ahead of the starting frame is
- * the current one?
- *
- * Note that all frames sent in the batch must be in a
- * 64-frame window, so this number should be in [0,63].
- * If outside of this window, then we've found a new
- * "first" frame in the batch and need to change start.
- */
- sh = idx - start;
-
- /*
- * If >= 64, out of window. start must be at the front
- * of the circular buffer, idx must be near the end of
- * the buffer, and idx is the new "first" frame. Shift
- * the indices around.
- */
- if (sh >= 64) {
- /* Shift bitmap by start - idx, wrapped */
- sh = 0x100 - idx + start;
- bitmap = bitmap << sh;
- /* Now idx is the new start so sh = 0 */
- sh = 0;
- start = idx;
- /*
- * If <= -64 then wraps the 256-pkt circular buffer
- * (e.g., start = 255 and idx = 0, sh should be 1)
- */
- } else if (sh <= -64) {
- sh = 0x100 - start + idx;
- /*
- * If < 0 but > -64, out of window. idx is before start
- * but not wrapped. Shift the indices around.
- */
- } else if (sh < 0) {
- /* Shift by how far start is ahead of idx */
- sh = start - idx;
- bitmap = bitmap << sh;
- /* Now idx is the new start so sh = 0 */
- start = idx;
- sh = 0;
- }
- /* Sequence number start + sh was sent in this batch */
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- /*
- * Store the bitmap and possibly the new start, if we wrapped
- * the buffer above
- */
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-void iwl_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- struct ieee80211_hdr *hdr;
- struct iwl_tx_info *txb;
- u32 status = le16_to_cpu(tx_resp->status.status);
- int tid;
- int sta_id;
- int freed;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
- "index %d is out of range [0-%d] %d %d\n", __func__,
- txq_id, index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- txb = &txq->txb[txq->q.read_ptr];
- info = IEEE80211_SKB_CB(txb->skb);
- memset(&info->status, 0, sizeof(info->status));
-
- tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
- IWLAGN_TX_RES_TID_POS;
- sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
- IWLAGN_TX_RES_RA_POS;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- hdr = (void *)txb->skb->data;
- if (!ieee80211_is_data_qos(hdr->frame_control))
- priv->last_seq_ctl = tx_resp->seq_ctl;
-
- if (txq->sched_retry) {
- const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg;
-
- agg = &priv->stations[sta_id].tid[tid].agg;
- /*
- * If the BT kill count is non-zero, we'll get this
- * notification again.
- */
- if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
- }
- iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- scd_ssn , index, txq_id, txq->swq_id);
-
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq);
- }
- } else {
- iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
- txq_id, false);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- iwl_queue_space(&txq->q) > txq->q.low_mark &&
- status != TX_STATUS_FAIL_PASSIVE_NO_RX)
- iwl_wake_queue(priv, txq);
- }
-
- iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
+#include "iwl-shared.h"
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
@@ -495,7 +51,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
"TX Power requested while scanning!\n"))
return -EAGAIN;
@@ -525,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
sizeof(tx_power_cmd), &tx_power_cmd);
}
@@ -538,11 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv)
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
{
- struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- u16 voltage;
- } *hdr;
+ struct iwl_eeprom_calib_hdr *hdr;
hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
EEPROM_CALIB_ALL);
@@ -609,6 +161,9 @@ struct iwl_mod_params iwlagn_mod_params = {
.bt_coex_active = true,
.no_sleep_autoadjust = true,
.power_level = IWL_POWER_INDEX_1,
+ .bt_ch_announce = true,
+ .wanted_ucode_alternative = 1,
+ .auto_agg = true,
/* the rest are 0 by default */
};
@@ -633,449 +188,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return -1;
}
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added = 0;
- u16 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- channel = iwl_get_single_channel_number(priv, band);
- if (channel) {
- scan_ch->channel = cpu_to_le16(channel);
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
-static int iwl_get_channels_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = chan->hw_value;
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
-{
- struct sk_buff *skb = priv->offchan_tx_skb;
-
- if (skb->len < maxlen)
- maxlen = skb->len;
-
- memcpy(data, skb->data, maxlen);
-
- return maxlen;
-}
-
-int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = { sizeof(struct iwl_scan_cmd), },
- .flags = CMD_SYNC,
- };
- struct iwl_scan_cmd *scan;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (vif)
- ctx = iwl_rxon_ctx_from_vif(vif);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv,
- "fail to allocate memory for scan\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
- iwl_is_any_associated(priv)) {
- u16 interval = 0;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- switch (priv->scan_type) {
- case IWL_SCAN_OFFCH_TX:
- WARN_ON(1);
- break;
- case IWL_SCAN_RADIO_RESET:
- interval = 0;
- break;
- case IWL_SCAN_NORMAL:
- interval = vif->bss_conf.beacon_int;
- break;
- }
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
- scan->suspend_time = 0;
- scan->max_out_time =
- cpu_to_le32(1024 * priv->offchan_tx_timeout);
- }
-
- switch (priv->scan_type) {
- case IWL_SCAN_RADIO_RESET:
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- break;
- case IWL_SCAN_NORMAL:
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
- break;
- case IWL_SCAN_OFFCH_TX:
- IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
- break;
- }
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(
- priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- /*
- * Internal scans are passive, so we can indiscriminately set
- * the BT ignore flag on 2.4 GHz since it applies to TX only.
- */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist)
- scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
- break;
- case IEEE80211_BAND_5GHZ:
- rate = IWL_RATE_6M_PLCP;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- *
- * This was fixed in later versions along with some other
- * scan changes, and the threshold behaves as a flag in those
- * versions.
- */
- if (priv->new_scan_threshold_behaviour)
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_DISABLED;
- else
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- band = priv->scan_band;
-
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
-
- if (band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- /* transmit 2.4 GHz probes only on first antenna */
- scan_tx_antennas = first_antenna(scan_tx_antennas);
- }
-
- priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
- scan_tx_antennas);
- rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = first_antenna(active_chains);
- }
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
- priv->bt_full_concurrent) {
- /* operated as 1x1 in full concurrency mode */
- rx_ant = first_antenna(rx_ant);
- }
-
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
- switch (priv->scan_type) {
- case IWL_SCAN_NORMAL:
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- break;
- case IWL_SCAN_RADIO_RESET:
- /* use bcast addr, will not be transmitted but must be valid */
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- iwl_bcast_addr, NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- break;
- case IWL_SCAN_OFFCH_TX:
- cmd_len = iwl_fill_offch_tx(priv, scan->data,
- IWL_MAX_SCAN_SIZE
- - sizeof(*scan)
- - sizeof(struct iwl_scan_channel));
- scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
- break;
- default:
- BUG();
- }
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- switch (priv->scan_type) {
- case IWL_SCAN_RADIO_RESET:
- scan->channel_count =
- iwl_get_single_channel_for_scan(priv, vif, band,
- (void *)&scan->data[cmd_len]);
- break;
- case IWL_SCAN_NORMAL:
- scan->channel_count =
- iwl_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[cmd_len]);
- break;
- case IWL_SCAN_OFFCH_TX: {
- struct iwl_scan_channel *scan_ch;
-
- scan->channel_count = 1;
-
- scan_ch = (void *)&scan->data[cmd_len];
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
- scan_ch->channel =
- cpu_to_le16(priv->offchan_tx_chan->hw_value);
- scan_ch->active_dwell =
- cpu_to_le16(priv->offchan_tx_timeout);
- scan_ch->passive_dwell = 0;
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- }
- break;
- }
-
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data[0] = scan;
- cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- scan->len = cpu_to_le16(cmd.len[0]);
-
- /* set scan bit here for PAN params */
- set_bit(STATUS_SCAN_HW, &priv->status);
-
- ret = iwlagn_set_pan_params(priv);
- if (ret)
- return ret;
-
- ret = trans_send_cmd(&priv->trans, &cmd);
- if (ret) {
- clear_bit(STATUS_SCAN_HW, &priv->status);
- iwlagn_set_pan_params(priv);
- }
-
- return ret;
-}
-
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add)
{
@@ -1089,52 +201,6 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
vif->bss_conf.bssid);
}
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_FLUSH_WAIT_MS 2000
-
-int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- int cnt;
- unsigned long now = jiffies;
- int ret = 0;
-
- /* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- if (cnt == priv->cmd_queue)
- continue;
- txq = &priv->txq[cnt];
- q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
- msleep(1);
-
- if (q->read_ptr != q->write_ptr) {
- IWL_ERR(priv, "fail to flush all tx fifo queues\n");
- ret = -ETIMEDOUT;
- break;
- }
- }
- return ret;
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
/**
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
*
@@ -1160,7 +226,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
IWL_SCD_MGMT_MSK;
if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
- (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+ (priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
@@ -1173,22 +239,22 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ieee80211_stop_queues(priv->hw);
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwlagn_wait_tx_queue_empty(priv);
+ iwl_trans_wait_tx_queue_empty(trans(priv));
done:
ieee80211_wake_queues(priv->hw);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/*
@@ -1367,12 +433,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
if (priv->cfg->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
@@ -1385,7 +451,7 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
struct iwl_rxon_context *ctx, *found_ctx = NULL;
bool found_ap = false;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Check whether AP or GO mode is active. */
if (rssi_ena) {
@@ -1498,7 +564,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
break;
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
/*
* We can not send command to firmware while scanning. When the scan
@@ -1507,7 +573,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
* STATUS_SCANNING to avoid race when queue_work two times from
* different notifications, but quit and not perform any work at all.
*/
- if (test_bit(STATUS_SCAN_HW, &priv->status))
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
goto out;
iwl_update_chain_flags(priv);
@@ -1526,7 +592,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
*/
iwlagn_bt_coex_rssi_monitor(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/*
@@ -1633,12 +699,13 @@ static void iwlagn_set_kill_msk(struct iwl_priv *priv,
priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
/* schedule to send runtime bt_config */
- queue_work(priv->workqueue, &priv->bt_runtime_config);
+ queue_work(priv->shrd->workqueue, &priv->bt_runtime_config);
}
}
-void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1647,7 +714,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
/* bt coex disabled */
- return;
+ return 0;
}
IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -1677,7 +744,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = coex->bt_status;
- queue_work(priv->workqueue,
+ queue_work(priv->shrd->workqueue,
&priv->bt_traffic_change_work);
}
}
@@ -1686,9 +753,10 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
/* FIXME: based on notification, adjust the prio_boost */
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
priv->bt_ci_compliance = coex->bt_ci_compliance;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ return 0;
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
@@ -1788,7 +856,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
bool is_single = is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+ bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
u32 active_chains;
u16 rx_chain;
@@ -1800,7 +868,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (priv->chain_noise_data.active_chains)
active_chains = priv->chain_noise_data.active_chains;
else
- active_chains = priv->hw_params.valid_rx_ant;
+ active_chains = hw_params(priv).valid_rx_ant;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1865,136 +933,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
return ant;
}
-static const char *get_csr_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CSR_HW_IF_CONFIG_REG);
- IWL_CMD(CSR_INT_COALESCING);
- IWL_CMD(CSR_INT);
- IWL_CMD(CSR_INT_MASK);
- IWL_CMD(CSR_FH_INT_STATUS);
- IWL_CMD(CSR_GPIO_IN);
- IWL_CMD(CSR_RESET);
- IWL_CMD(CSR_GP_CNTRL);
- IWL_CMD(CSR_HW_REV);
- IWL_CMD(CSR_EEPROM_REG);
- IWL_CMD(CSR_EEPROM_GP);
- IWL_CMD(CSR_OTP_GP_REG);
- IWL_CMD(CSR_GIO_REG);
- IWL_CMD(CSR_GP_UCODE_REG);
- IWL_CMD(CSR_GP_DRIVER_REG);
- IWL_CMD(CSR_UCODE_DRV_GP1);
- IWL_CMD(CSR_UCODE_DRV_GP2);
- IWL_CMD(CSR_LED_REG);
- IWL_CMD(CSR_DRAM_INT_TBL_REG);
- IWL_CMD(CSR_GIO_CHICKEN_BITS);
- IWL_CMD(CSR_ANA_PLL_CFG);
- IWL_CMD(CSR_HW_REV_WA_REG);
- IWL_CMD(CSR_DBG_HPET_MEM_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-void iwl_dump_csr(struct iwl_priv *priv)
-{
- int i;
- static const u32 csr_tbl[] = {
- CSR_HW_IF_CONFIG_REG,
- CSR_INT_COALESCING,
- CSR_INT,
- CSR_INT_MASK,
- CSR_FH_INT_STATUS,
- CSR_GPIO_IN,
- CSR_RESET,
- CSR_GP_CNTRL,
- CSR_HW_REV,
- CSR_EEPROM_REG,
- CSR_EEPROM_GP,
- CSR_OTP_GP_REG,
- CSR_GIO_REG,
- CSR_GP_UCODE_REG,
- CSR_GP_DRIVER_REG,
- CSR_UCODE_DRV_GP1,
- CSR_UCODE_DRV_GP2,
- CSR_LED_REG,
- CSR_DRAM_INT_TBL_REG,
- CSR_GIO_CHICKEN_BITS,
- CSR_ANA_PLL_CFG,
- CSR_HW_REV_WA_REG,
- CSR_DBG_HPET_MEM_REG
- };
- IWL_ERR(priv, "CSR values:\n");
- IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
- "CSR_INT_PERIODIC_REG)\n");
- for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
- IWL_ERR(priv, " %25s: 0X%08x\n",
- get_csr_string(csr_tbl[i]),
- iwl_read32(priv, csr_tbl[i]));
- }
-}
-
-static const char *get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
-
/* notification wait support */
void iwlagn_init_notification_wait(struct iwl_priv *priv,
struct iwl_notification_wait *wait_entry,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 3789ff4bf53..66118cea2af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
@@ -37,7 +36,6 @@
#include <linux/workqueue.h>
#include "iwl-dev.h"
-#include "iwl-sta.h"
#include "iwl-core.h"
#include "iwl-agn.h"
@@ -298,10 +296,10 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
} else
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
@@ -314,7 +312,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
tl->queue_count = 1;
tl->head = 0;
tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
+ return IWL_MAX_TID_COUNT;
}
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
@@ -347,7 +345,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
{
struct iwl_station_priv *sta_priv =
container_of(lq_sta, struct iwl_station_priv, lq_sta);
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
@@ -421,7 +419,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
load = rs_tl_get_load(lq_data, tid);
- if (load > IWL_AGG_LOAD_THRESHOLD) {
+ if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -711,7 +709,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
static bool rs_use_green(struct ieee80211_sta *sta)
{
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!(ctx->ht.non_gf_sta_present);
@@ -820,7 +818,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
@@ -878,12 +876,12 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
@@ -894,7 +892,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
- queue_work(priv->workqueue, &priv->bt_full_concurrency);
+ queue_work(priv->shrd->workqueue, &priv->bt_full_concurrency);
}
}
@@ -918,7 +916,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
struct iwl_scale_tbl_info tbl_type;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -1284,7 +1282,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1294,7 +1292,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
return -1;
/* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
+ if (hw_params(priv).tx_chains_num < 2)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
@@ -1340,7 +1338,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1350,7 +1348,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
return -1;
/* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 3)
+ if (hw_params(priv).tx_chains_num < 3)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
@@ -1397,7 +1395,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
u8 is_green = lq_sta->is_green;
s32 rate;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1449,8 +1447,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
@@ -1460,14 +1458,16 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1490,7 +1490,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
}
start_action = tbl->action;
@@ -1624,8 +1625,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1635,14 +1636,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
@@ -1659,7 +1662,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
- valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
+ valid_tx_ant =
+ first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
@@ -1795,8 +1799,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1965,8 +1969,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
@@ -2209,7 +2213,6 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
/*
* setup rate table in uCode
- * return rate_n_flags as used in the table
*/
static void rs_update_rate_tbl(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -2256,10 +2259,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
u8 done_search = 0;
u16 high_low;
s32 sr;
- u8 tid = MAX_TID_COUNT;
+ u8 tid = IWL_MAX_TID_COUNT;
struct iwl_tid_data *tid_data;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+ struct iwl_rxon_context *ctx = sta_priv->ctx;
IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
@@ -2269,14 +2272,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- if (!sta || !lq_sta)
- return;
-
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if ((tid != IWL_MAX_TID_COUNT) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
@@ -2646,9 +2647,10 @@ lq_update:
iwl_ht_enabled(priv)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
+ (tid != IWL_MAX_TID_COUNT)) {
+ u8 sta_id = lq_sta->lq.sta_id;
tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ &priv->shrd->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
@@ -2663,8 +2665,7 @@ lq_update:
out:
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
- i = index;
- lq_sta->last_txrate_idx = i;
+ lq_sta->last_txrate_idx = index;
}
/**
@@ -2700,11 +2701,11 @@ static void rs_initialize_lq(struct iwl_priv *priv,
return;
sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
+ ctx = sta_priv->ctx;
i = lq_sta->last_txrate_idx;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2887,15 +2888,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant &
+ ~first_antenna(hw_params(priv).valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ hw_params(priv).valid_tx_ant;
}
/* as default allow aggregation for all tids */
@@ -2941,7 +2942,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
@@ -2973,7 +2974,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
}
/* Fill rest of rate table */
@@ -3007,7 +3008,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
@@ -3098,7 +3099,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u8 ant_sel_tx;
priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = hw_params(priv).valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3169,9 +3170,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index bdae82e7fa9..f4f6deb829a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -27,6 +27,10 @@
#ifndef __iwl_agn_rs_h__
#define __iwl_agn_rs_h__
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 8e314003b63..5af9e6258a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -3,7 +3,7 @@
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
+ * as portionhelp of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -35,12 +35,93 @@
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
+#include "iwl-shared.h"
+const char *get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IWL_CMD(REPLY_ALIVE);
+ IWL_CMD(REPLY_ERROR);
+ IWL_CMD(REPLY_ECHO);
+ IWL_CMD(REPLY_RXON);
+ IWL_CMD(REPLY_RXON_ASSOC);
+ IWL_CMD(REPLY_QOS_PARAM);
+ IWL_CMD(REPLY_RXON_TIMING);
+ IWL_CMD(REPLY_ADD_STA);
+ IWL_CMD(REPLY_REMOVE_STA);
+ IWL_CMD(REPLY_REMOVE_ALL_STA);
+ IWL_CMD(REPLY_TXFIFO_FLUSH);
+ IWL_CMD(REPLY_WEPKEY);
+ IWL_CMD(REPLY_TX);
+ IWL_CMD(REPLY_LEDS_CMD);
+ IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+ IWL_CMD(COEX_PRIORITY_TABLE_CMD);
+ IWL_CMD(COEX_MEDIUM_NOTIFICATION);
+ IWL_CMD(COEX_EVENT_CMD);
+ IWL_CMD(REPLY_QUIET_CMD);
+ IWL_CMD(REPLY_CHANNEL_SWITCH);
+ IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+ IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+ IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+ IWL_CMD(POWER_TABLE_CMD);
+ IWL_CMD(PM_SLEEP_NOTIFICATION);
+ IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+ IWL_CMD(REPLY_SCAN_CMD);
+ IWL_CMD(REPLY_SCAN_ABORT_CMD);
+ IWL_CMD(SCAN_START_NOTIFICATION);
+ IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+ IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+ IWL_CMD(BEACON_NOTIFICATION);
+ IWL_CMD(REPLY_TX_BEACON);
+ IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
+ IWL_CMD(QUIET_NOTIFICATION);
+ IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+ IWL_CMD(MEASURE_ABORT_NOTIFICATION);
+ IWL_CMD(REPLY_BT_CONFIG);
+ IWL_CMD(REPLY_STATISTICS_CMD);
+ IWL_CMD(STATISTICS_NOTIFICATION);
+ IWL_CMD(REPLY_CARD_STATE_CMD);
+ IWL_CMD(CARD_STATE_NOTIFICATION);
+ IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+ IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+ IWL_CMD(SENSITIVITY_CMD);
+ IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+ IWL_CMD(REPLY_RX_PHY_CMD);
+ IWL_CMD(REPLY_RX_MPDU_CMD);
+ IWL_CMD(REPLY_RX);
+ IWL_CMD(REPLY_COMPRESSED_BA);
+ IWL_CMD(CALIBRATION_CFG_CMD);
+ IWL_CMD(CALIBRATION_RES_NOTIFICATION);
+ IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
+ IWL_CMD(REPLY_TX_POWER_DBM_CMD);
+ IWL_CMD(TEMPERATURE_NOTIFICATION);
+ IWL_CMD(TX_ANT_CONFIGURATION_CMD);
+ IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
+ IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
+ IWL_CMD(REPLY_BT_COEX_PROT_ENV);
+ IWL_CMD(REPLY_WIPAN_PARAMS);
+ IWL_CMD(REPLY_WIPAN_RXON);
+ IWL_CMD(REPLY_WIPAN_RXON_TIMING);
+ IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
+ IWL_CMD(REPLY_WIPAN_QOS_PARAM);
+ IWL_CMD(REPLY_WIPAN_WEPKEY);
+ IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
+ IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+ IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
+ IWL_CMD(REPLY_WOWLAN_PATTERNS);
+ IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
+ IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
+ IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
+ IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
+ IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ default:
+ return "UNKNOWN";
+
+ }
+}
/******************************************************************************
*
@@ -48,8 +129,9 @@
*
******************************************************************************/
-static void iwl_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -60,21 +142,23 @@ static void iwl_rx_reply_error(struct iwl_priv *priv,
pkt->u.err_resp.cmd_id,
le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
le32_to_cpu(pkt->u.err_resp.error_info));
+ return 0;
}
-static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return;
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ return 0;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
rxon->channel = csa->channel;
@@ -87,11 +171,13 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
+ return 0;
}
-static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
@@ -99,15 +185,17 @@ static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
if (!report->state) {
IWL_DEBUG_11H(priv,
"Spectrum Measure Notification: Start\n");
- return;
+ return 0;
}
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
+ return 0;
}
-static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -115,21 +203,26 @@ static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
+ return 0;
}
-static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ u32 __maybe_unused len =
+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for %s:\n", len,
get_cmd_string(pkt->hdr.cmd));
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+ return 0;
}
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
@@ -148,8 +241,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->beacon_update);
+ return 0;
}
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
@@ -164,7 +256,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
* the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
* operation state.
*/
-static bool iwl_good_ack_health(struct iwl_priv *priv,
+static bool iwlagn_good_ack_health(struct iwl_priv *priv,
struct statistics_tx *cur)
{
int actual_delta, expected_delta, ba_timeout_delta;
@@ -189,8 +281,9 @@ static bool iwl_good_ack_health(struct iwl_priv *priv,
if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
ba_timeout_delta > BA_TIMEOUT_CNT) {
- IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
- actual_delta, expected_delta, ba_timeout_delta);
+ IWL_DEBUG_RADIO(priv,
+ "deltas: actual %d expected %d ba_timeout %d\n",
+ actual_delta, expected_delta, ba_timeout_delta);
#ifdef CONFIG_IWLWIFI_DEBUGFS
/*
@@ -218,7 +311,7 @@ static bool iwl_good_ack_health(struct iwl_priv *priv,
* When the plcp error is exceeding the thresholds, reset the radio
* to improve the throughput.
*/
-static bool iwl_good_plcp_health(struct iwl_priv *priv,
+static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
struct statistics_rx_phy *cur_ofdm,
struct statistics_rx_ht_phy *cur_ofdm_ht,
unsigned int msecs)
@@ -250,15 +343,15 @@ static bool iwl_good_plcp_health(struct iwl_priv *priv,
return true;
}
-static void iwl_recover_from_statistics(struct iwl_priv *priv,
- struct statistics_rx_phy *cur_ofdm,
- struct statistics_rx_ht_phy *cur_ofdm_ht,
- struct statistics_tx *tx,
- unsigned long stamp)
+static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
+ struct statistics_rx_phy *cur_ofdm,
+ struct statistics_rx_ht_phy *cur_ofdm_ht,
+ struct statistics_tx *tx,
+ unsigned long stamp)
{
unsigned int msecs;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
@@ -271,21 +364,21 @@ static void iwl_recover_from_statistics(struct iwl_priv *priv,
if (msecs < 99)
return;
- if (iwlagn_mod_params.ack_check && !iwl_good_ack_health(priv, tx)) {
+ if (iwlagn_mod_params.ack_check && !iwlagn_good_ack_health(priv, tx)) {
IWL_ERR(priv, "low ack count detected, restart firmware\n");
if (!iwl_force_reset(priv, IWL_FW_RESET, false))
return;
}
if (iwlagn_mod_params.plcp_check &&
- !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
+ !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
iwl_force_reset(priv, IWL_RF_RESET, false);
}
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
* exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
+static void iwlagn_rx_calc_noise(struct iwl_priv *priv)
{
struct statistics_rx_non_phy *rx_info;
int num_active_rx = 0;
@@ -351,7 +444,7 @@ static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
}
static void
-iwl_accumulative_statistics(struct iwl_priv *priv,
+iwlagn_accumulative_statistics(struct iwl_priv *priv,
struct statistics_general_common *common,
struct statistics_rx_non_phy *rx_non_phy,
struct statistics_rx_phy *rx_ofdm,
@@ -380,7 +473,7 @@ iwl_accumulative_statistics(struct iwl_priv *priv,
}
#else
static inline void
-iwl_accumulative_statistics(struct iwl_priv *priv,
+iwlagn_accumulative_statistics(struct iwl_priv *priv,
struct statistics_general_common *common,
struct statistics_rx_non_phy *rx_non_phy,
struct statistics_rx_phy *rx_ofdm,
@@ -392,8 +485,9 @@ iwl_accumulative_statistics(struct iwl_priv *priv,
}
#endif
-static void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
unsigned long stamp = jiffies;
const int reg_recalib_period = 60;
@@ -447,17 +541,17 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
len, sizeof(struct iwl_bt_notif_statistics),
sizeof(struct iwl_notif_statistics));
- return;
+ return 0;
}
change = common->temperature != priv->statistics.common.temperature ||
(*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
(priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
- iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
+ iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
rx_ofdm_ht, rx_cck, tx, bt_activity);
- iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
+ iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
priv->statistics.flag = *flag;
memcpy(&priv->statistics.common, common, sizeof(*common));
@@ -474,7 +568,7 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
priv->rx_statistics_jiffies = stamp;
- set_bit(STATUS_STATISTICS, &priv->status);
+ set_bit(STATUS_STATISTICS, &priv->shrd->status);
/* Reschedule the statistics timer to occur in
* reg_recalib_period seconds to ensure we get a
@@ -483,17 +577,19 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
mod_timer(&priv->statistics_periodic, jiffies +
msecs_to_jiffies(reg_recalib_period * 1000));
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->shrd->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
+ iwlagn_rx_calc_noise(priv);
+ queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
}
if (priv->cfg->lib->temperature && change)
priv->cfg->lib->temperature(priv);
+ return 0;
}
-static void iwl_rx_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -508,17 +604,19 @@ static void iwl_rx_reply_statistics(struct iwl_priv *priv,
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
- iwl_rx_statistics(priv, rxb);
+ iwlagn_rx_statistics(priv, rxb, cmd);
+ return 0;
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
-static void iwl_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
+ unsigned long status = priv->shrd->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
@@ -529,16 +627,16 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+ iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+ iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
if (flags & CT_CARD_DISABLED)
@@ -548,24 +646,26 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
+ set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
else
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->shrd->wait_command_queue);
+ return 0;
}
-static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -580,27 +680,30 @@ static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
+ if (!test_bit(STATUS_SCANNING, &priv->shrd->status))
iwl_init_sensitivity(priv);
}
+ return 0;
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->last_phy_res_valid = true;
memcpy(&priv->last_phy_res, pkt->u.raw,
sizeof(struct iwl_rx_phy_res));
+ return 0;
}
/*
* returns non-zero if packet should be dropped
*/
-static int iwl_set_decrypted_flag(struct iwl_priv *priv,
+static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats)
@@ -649,7 +752,7 @@ static int iwl_set_decrypted_flag(struct iwl_priv *priv,
return 0;
}
-static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
+static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
@@ -669,7 +772,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
/* In case of HW accelerated crypto and bad decryption, drop */
if (!iwlagn_mod_params.sw_crypto &&
- iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
@@ -697,7 +800,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
ctx->active.bssid_addr))
continue;
ctx->last_tx_rejected = false;
- iwl_wake_any_queue(priv, ctx);
+ iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
}
}
@@ -707,7 +810,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
rxb->page = NULL;
}
-static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
@@ -809,8 +912,9 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-static void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
@@ -843,28 +947,28 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv,
} else {
if (!priv->last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
+ return 0;
}
phy_res = &priv->last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl_translate_rx_status(priv,
+ ampdu_status = iwlagn_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
- return;
+ return 0;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
- return;
+ return 0;
}
/* This will be used in several places later */
@@ -923,8 +1027,9 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv,
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
- iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
+ return 0;
}
/**
@@ -935,36 +1040,42 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv,
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
- void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+ int (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
handlers = priv->rx_handlers;
- handlers[REPLY_ERROR] = iwl_rx_reply_error;
- handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
- handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
- handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
- handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
- handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
+ handlers[REPLY_ERROR] = iwlagn_rx_reply_error;
+ handlers[CHANNEL_SWITCH_NOTIFICATION] = iwlagn_rx_csa;
+ handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwlagn_rx_spectrum_measure_notif;
+ handlers[PM_SLEEP_NOTIFICATION] = iwlagn_rx_pm_sleep_notif;
+ handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+ iwlagn_rx_pm_debug_statistics_notif;
+ handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
+ handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
- handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
- handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
+ handlers[REPLY_STATISTICS_CMD] = iwlagn_rx_reply_statistics;
+ handlers[STATISTICS_NOTIFICATION] = iwlagn_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
- handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
- handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
+ handlers[CARD_STATE_NOTIFICATION] = iwlagn_rx_card_state_notif;
+ handlers[MISSED_BEACONS_NOTIFICATION] =
+ iwlagn_rx_missed_beacon_notif;
/* Rx handlers */
- handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
- handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
+ handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
+ handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
- handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
+ handlers[REPLY_COMPRESSED_BA] =
+ iwlagn_rx_reply_compressed_ba;
/* init calibration handlers */
priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
@@ -982,9 +1093,11 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
}
-void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int err = 0;
/*
* Do the notification wait before RX handlers so
@@ -1018,12 +1131,13 @@ void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
* handle those that need handling via function in
* rx_handlers table. See iwl_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+ priv->rx_handlers_stats[pkt->hdr.cmd]++;
+ err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
"No handler needed for %s, 0x%02x\n",
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
}
+ return err;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index d42ef1763a7..58a381c01c8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -26,11 +26,10 @@
#include "iwl-dev.h"
#include "iwl-agn.h"
-#include "iwl-sta.h"
#include "iwl-core.h"
#include "iwl-agn-calib.h"
-#include "iwl-helpers.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
static int iwlagn_disable_bss(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -40,7 +39,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -66,7 +65,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -92,7 +91,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
sizeof(*send), send);
send->filter_flags = old_filter;
@@ -121,7 +120,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -131,7 +130,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
static int iwlagn_update_beacon(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
@@ -180,7 +179,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
@@ -266,7 +265,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -295,8 +294,8 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
return ret;
}
- if ((ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) &&
- priv->cfg->ht_params->smps_mode)
+ if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
+ priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
ieee80211_request_smps(ctx->vif,
priv->cfg->ht_params->smps_mode);
@@ -310,12 +309,12 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
int slot0 = 300, slot1 = 0;
int ret;
- if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
+ if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS))
return 0;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
@@ -337,10 +336,10 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (priv->hw_roc_channel) {
+ if (priv->hw_roc_setup) {
/* both contexts must be used for this to happen */
- slot1 = priv->hw_roc_duration;
- slot0 = IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
+ slot0 = 3000;
} else if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
@@ -362,14 +361,14 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
- if (test_bit(STATUS_SCAN_HW, &priv->status) ||
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
} else if (!ctx_pan->vif->bss_conf.idle &&
!ctx_pan->vif->bss_conf.assoc) {
- slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot0 = IWL_MIN_SLOT_TIME;
}
} else if (ctx_pan->vif) {
@@ -378,7 +377,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
ctx_pan->beacon_int;
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
}
@@ -387,7 +386,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -420,12 +419,12 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EINVAL;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EBUSY;
/* This function hardcodes a bunch of dual-mode assumptions */
@@ -434,26 +433,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (!ctx->is_active)
return 0;
+ /* override BSSID if necessary due to preauth */
+ if (ctx->preauth_bssid)
+ memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
+
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
- if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) {
- struct ieee80211_channel *chan = priv->hw_roc_channel;
-
- iwl_set_rxon_channel(priv, chan, ctx);
- iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
- ctx->staging.filter_flags |=
- RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- new_assoc = true;
-
- if (memcmp(&ctx->staging, &ctx->active,
- sizeof(ctx->staging)) == 0)
- return 0;
- }
-
/*
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
@@ -468,7 +454,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- iwl_print_rx_config_cmd(priv, ctx);
+ iwl_print_rx_config_cmd(priv, ctx->ctxid);
ret = iwl_check_rxon_cmd(priv, ctx);
if (ret) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
@@ -479,7 +465,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_channel));
@@ -551,16 +537,16 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
const struct iwl_channel_info *ch_info;
int ret = 0;
- IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
+ IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
}
- if (!iwl_is_ready(priv)) {
+ if (!iwl_is_ready(priv->shrd)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
@@ -592,7 +578,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
for_each_context(priv, ctx) {
/* Configure HT40 channels */
@@ -636,7 +622,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->vif);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
iwl_update_bcast_stations(priv);
@@ -668,7 +654,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
iwlagn_commit_rxon(priv, ctx);
}
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
return ret;
}
@@ -683,7 +671,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -787,7 +775,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr,
priv->phy_calib_chain_noise_reset_cmd);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_PHY_CALIBRATION_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
@@ -808,17 +796,17 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
int ret;
bool force = false;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (unlikely(!iwl_is_ready(priv))) {
+ if (unlikely(!iwl_is_ready(priv->shrd))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return;
}
if (unlikely(!ctx->vif)) {
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return;
}
@@ -851,7 +839,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
*/
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
- iwl_wake_any_queue(priv, ctx);
+ iwl_trans_wake_any_queue(trans(priv),
+ ctx->ctxid);
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -912,6 +901,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (!priv->disable_chain_noise_cal)
iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
+ WARN_ON(ctx->preauth_bssid);
}
if (changes & BSS_CHANGED_IBSS) {
@@ -929,7 +919,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
IWL_ERR(priv, "Error sending IBSS beacon\n");
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
void iwlagn_post_scan(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 37e624095e4..ed628362393 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -31,25 +31,833 @@
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-static struct iwl_link_quality_cmd *
-iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
+/* priv->shrd->sta_lock must be held */
+static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
+ "addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode "
+ "(according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ return ret;
+ }
+
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+ iwl_sta_ucode_activate(priv, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack "
+ "resource.\n", sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
+ break;
+ }
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ return ret;
+}
+
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *) cmd->payload;
+
+ return iwl_process_add_sta_resp(priv, addsta, pkt);
+}
+
+static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+{
+ u16 size = (u16)sizeof(struct iwl_addsta_cmd);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
+ return size;
+}
+
+int iwl_send_add_sta(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *sta, u8 flags)
+{
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ADD_STA,
+ .flags = flags,
+ .data = { data, },
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
+
+ if (!(flags & CMD_ASYNC)) {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+ /*else the command was successfully sent in SYNC mode, need to free
+ * the reply page */
+
+ iwl_free_pages(priv->shrd, cmd.reply_page);
+
+ if (cmd.handler_status)
+ IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
+ cmd.handler_status);
+
+ return cmd.handler_status;
+}
+
+static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ "dynamic" : "disabled");
+
+ sta_flags = priv->stations[index].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ priv->stations[index].sta.station_flags = sta_flags;
+ done:
+ return;
+}
+
+/**
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct iwl_station_entry *station;
+ int i;
+ u8 sta_id = IWL_INVALID_STATION;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
+ if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!priv->stations[i].used &&
+ sta_id == IWL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+ "added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+ "adding again.\n", sta_id, addr);
+ return sta_id;
+ }
+
+ station = &priv->stations[sta_id];
+ station->used = IWL_STA_DRIVER_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+ sta_id, addr);
+ priv->num_stations++;
+
+ /* Set up the REPLY_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct iwl_station_priv *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_set_ht_add_station(priv, sta_id, sta, ctx);
+
+ return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+ "added.\n", sta_id);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+ "adding again.\n", sta_id, addr);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_addsta_cmd));
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[sta_id].sta.sta.addr);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->shrd->sta_lock must be held
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+ IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_send_remove_station(struct iwl_priv *priv,
+ const u8 *addr, int sta_id,
+ bool temporary)
+{
+ struct iwl_rx_packet *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct iwl_rem_sta_cmd rm_sta_cmd;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_REMOVE_STA,
+ .len = { sizeof(struct iwl_rem_sta_cmd), },
+ .flags = CMD_SYNC,
+ .data = { &rm_sta_cmd, },
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&priv->shrd->sta_lock,
+ flags_spin);
+ iwl_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+ }
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
+ }
+ }
+ iwl_free_pages(priv->shrd, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * iwl_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ unsigned long flags;
+
+ if (!iwl_is_ready(priv->shrd)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ if (WARN_ON(priv->num_stations < 0))
+ priv->num_stations = 0;
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ return iwl_send_remove_station(priv, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return -EINVAL;
+}
+
+/**
+ * iwl_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv,
+ "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv,
+ "No active stations found to be cleared\n");
+}
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!iwl_is_ready(priv->shrd)) {
+ IWL_DEBUG_INFO(priv,
+ "Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &priv->stations[i].sta,
+ sizeof(struct iwl_addsta_cmd));
+ send_lq = false;
+ if (priv->stations[i].lq) {
+ if (priv->shrd->wowlan)
+ iwl_sta_fill_lq(priv, ctx, i, &lq);
+ else
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->shrd->sta_lock,
+ flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].used &=
+ ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ iwl_send_lq_cmd(priv, ctx, &lq,
+ CMD_SYNC, true);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+ "no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+ "complete.\n");
+}
+
+void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ unsigned long flags;
+ int sta_id = ctx->ap_sta_id;
+ int ret;
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ bool active;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return;
+ }
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+ sta_cmd.mode = 0;
+ memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+
+ active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ if (active) {
+ ret = iwl_send_remove_station(
+ priv, priv->stations[sta_id].sta.sta.addr,
+ sta_id, true);
+ if (ret)
+ IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
+ priv->stations[sta_id].sta.sta.addr, ret);
+ }
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret)
+ IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
+ priv->stations[sta_id].sta.sta.addr, ret);
+ iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+}
+
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &priv->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ if (WARN_ON(priv->num_stations < 0))
+ priv->num_stations = 0;
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static void iwl_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+ IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+ IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+ lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+ i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+ RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = { sizeof(struct iwl_link_quality_cmd), },
+ .flags = flags,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+
+ iwl_dump_lq_cmd(priv, lq);
+ if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+ return -EINVAL;
+
+ if (is_lq_table_valid(priv, ctx, lq))
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete, "
+ "clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ }
+ return ret;
+}
+
+int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+ "station %pM\n", sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
{
int i, r;
- struct iwl_link_quality_cmd *link_cmd;
u32 rate_flags = 0;
__le32 rate_n_flags;
- link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
- if (!link_cmd) {
- IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
- return NULL;
- }
+ lockdep_assert_held(&priv->shrd->mutex);
- lockdep_assert_held(&priv->mutex);
+ memset(link_cmd, 0, sizeof(*link_cmd));
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
@@ -63,30 +871,46 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(hw_params(priv).valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant &
+ ~first_antenna(hw_params(priv).valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ hw_params(priv).valid_tx_ant;
}
- link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_dis_start_th =
+ LINK_QUAL_AGG_DISABLE_START_DEF;
link_cmd->agg_params.agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
link_cmd->sta_id = sta_id;
+}
+
+static struct iwl_link_quality_cmd *
+iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+
+ iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd);
return link_cmd;
}
@@ -96,7 +920,8 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
*
* Function sleeps.
*/
-int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+int iwlagn_add_bssid_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
const u8 *addr, u8 *sta_id_r)
{
int ret;
@@ -116,14 +941,15 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
if (sta_id_r)
*sta_id_r = sta_id;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
/* Set up default rate scaling table in device's station table */
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
- IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for station %pM.\n",
addr);
return -ENOMEM;
}
@@ -132,9 +958,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -189,7 +1015,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
cmd.len[0] = cmd_size;
if (not_empty || send_if_empty)
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
else
return 0;
}
@@ -197,7 +1023,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
return iwl_send_static_wepkey_cmd(priv, ctx, false);
}
@@ -208,14 +1034,15 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+ if (iwl_is_rfkill(priv->shrd)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
}
@@ -232,11 +1059,12 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
- IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
+ IWL_DEBUG_WEP(priv,
+ "Bad WEP key length %d\n", keyconf->keylen);
return -EINVAL;
}
@@ -311,9 +1139,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
struct iwl_addsta_cmd sta_cmd;
int i;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
@@ -388,16 +1216,16 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
if (sta_id == IWL_INVALID_STATION)
return -ENOENT;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
sta_id = IWL_INVALID_STATION;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
if (sta_id == IWL_INVALID_STATION)
return 0;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
ctx->key_mapping_keys--;
@@ -430,7 +1258,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
@@ -493,18 +1321,18 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
unsigned long flags;
u8 sta_id;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return -EINVAL;
}
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
@@ -513,9 +1341,9 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
return -ENOMEM;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -539,13 +1367,13 @@ int iwl_update_bcast_station(struct iwl_priv *priv,
return -ENOMEM;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (priv->stations[sta_id].lq)
kfree(priv->stations[sta_id].lq);
else
IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -572,15 +1400,15 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -592,20 +1420,20 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -617,7 +1445,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -625,13 +1453,13 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return -ENXIO;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -640,14 +1468,14 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask = 0;
priv->stations[sta_id].sta.sleep_tx_count = 0;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
@@ -655,7 +1483,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask =
@@ -663,7 +1491,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
@@ -676,6 +1504,8 @@ void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
switch (cmd) {
case STA_NOTIFY_SLEEP:
WARN_ON(!sta_priv->client);
@@ -695,4 +1525,5 @@ void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
default:
break;
}
+ IWL_DEBUG_MAC80211(priv, "leave\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index f501d742984..c27180a7335 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -114,9 +114,6 @@ static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
bool within_margin = false;
- if (priv->cfg->base_params->temperature_kelvin)
- temp = KELVIN_TO_CELSIUS(priv->temperature);
-
if (!priv->thermal_throttle.advanced_tt)
within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
CT_KILL_THRESHOLD_LEGACY) ? true : false;
@@ -176,24 +173,24 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (tt->state == IWL_TI_CT_KILL) {
if (priv->thermal_throttle.ct_kill_toggle) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = false;
} else {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = true;
}
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus(priv)))
+ iwl_release_nic_access(bus(priv));
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -227,7 +224,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
struct iwl_priv *priv = (struct iwl_priv *)data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* temperature timer expired, ready to go into CT_KILL state */
@@ -235,7 +232,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
"temperature timer expired\n");
tt->state = IWL_TI_CT_KILL;
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
}
}
@@ -313,23 +310,24 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
tt->tt_power_mode = IWL_POWER_INDEX_5;
break;
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->status);
+ clear_bit(STATUS_CT_KILL, &priv->shrd->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
* try again during next temperature read
*/
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
tt->state = old_state;
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
} else {
if (tt->state == IWL_TI_CT_KILL) {
if (force) {
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL,
+ &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -343,7 +341,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
tt->tt_power_mode);
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
@@ -453,9 +451,9 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
* in case get disabled before */
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->status);
+ clear_bit(STATUS_CT_KILL, &priv->shrd->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
@@ -464,7 +462,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL, &priv->shrd->status);
tt->state = old_state;
} else {
IWL_DEBUG_TEMP(priv,
@@ -475,7 +473,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
if (force) {
IWL_DEBUG_TEMP(priv,
"Enter IWL_TI_CT_KILL\n");
- set_bit(STATUS_CT_KILL, &priv->status);
+ set_bit(STATUS_CT_KILL,
+ &priv->shrd->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -487,7 +486,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
iwl_perform_ct_kill_task(priv, false);
}
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
@@ -506,10 +505,10 @@ static void iwl_bg_ct_enter(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (!iwl_is_ready(priv))
+ if (!iwl_is_ready(priv->shrd))
return;
if (tt->state != IWL_TI_CT_KILL) {
@@ -535,10 +534,10 @@ static void iwl_bg_ct_exit(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (!iwl_is_ready(priv))
+ if (!iwl_is_ready(priv->shrd))
return;
/* stop ct_kill_exit_tm timer */
@@ -565,20 +564,20 @@ static void iwl_bg_ct_exit(struct work_struct *work)
void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
- queue_work(priv->workqueue, &priv->ct_enter);
+ queue_work(priv->shrd->workqueue, &priv->ct_enter);
}
void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
- queue_work(priv->workqueue, &priv->ct_exit);
+ queue_work(priv->shrd->workqueue, &priv->ct_exit);
}
static void iwl_bg_tt_work(struct work_struct *work)
@@ -586,12 +585,9 @@ static void iwl_bg_tt_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (priv->cfg->base_params->temperature_kelvin)
- temp = KELVIN_TO_CELSIUS(priv->temperature);
-
if (!priv->thermal_throttle.advanced_tt)
iwl_legacy_tt_handler(priv, temp, false);
else
@@ -600,11 +596,11 @@ static void iwl_bg_tt_work(struct work_struct *work)
void iwl_tt_handler(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
- queue_work(priv->workqueue, &priv->tt_work);
+ queue_work(priv->shrd->workqueue, &priv->tt_work);
}
/* Thermal throttling initialization
@@ -639,11 +635,13 @@ void iwl_tt_initialize(struct iwl_priv *priv)
if (priv->cfg->base_params->adv_thermal_throttle) {
IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
- tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
- IWL_TI_STATE_MAX, GFP_KERNEL);
- tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
- IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
- GFP_KERNEL);
+ tt->restriction = kcalloc(IWL_TI_STATE_MAX,
+ sizeof(struct iwl_tt_restriction),
+ GFP_KERNEL);
+ tt->transaction = kcalloc(IWL_TI_STATE_MAX *
+ (IWL_TI_STATE_MAX - 1),
+ sizeof(struct iwl_tt_trans),
+ GFP_KERNEL);
if (!tt->restriction || !tt->transaction) {
IWL_ERR(priv, "Fallback to Legacy Throttling\n");
priv->thermal_throttle.advanced_tt = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
index d118ed29bf3..7282a23e8f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -117,7 +117,6 @@ struct iwl_tt_mgmt {
u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
bool iwl_ht_enabled(struct iwl_priv *priv);
-bool iwl_check_for_ct_kill(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 53bb59ee719..35a6b71f358 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -31,89 +31,15 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/ieee80211.h>
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
- int tid)
-{
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
- IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- /* Modify device's station table to Tx this TID */
- return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-}
-
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags)
@@ -128,11 +54,10 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
* handle build REPLY_TX command notification.
*/
static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
+ struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 sta_id)
{
__le16 fc = hdr->frame_control;
__le32 tx_flags = tx_cmd->tx_flags;
@@ -157,7 +82,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_IGNORE_BT;
- tx_cmd->sta_id = std_id;
+ tx_cmd->sta_id = sta_id;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
@@ -186,12 +111,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->next_frame_len = 0;
}
-#define RTS_DFAULT_RETRY_LIMIT 60
-
static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc)
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
{
u32 rate_flags;
int rate_idx;
@@ -199,17 +122,25 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
u8 data_retry_limit;
u8 rate_plcp;
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
+ if (priv->shrd->wowlan) {
+ rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
+ data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
+ } else {
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc)) {
+ data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
+ rts_retry_limit =
+ min(data_retry_limit, rts_retry_limit);
+ } else if (ieee80211_is_back_req(fc))
+ data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
+ else
+ data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
+ }
- /* Set retry limit on RTS packets */
- rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
+ tx_cmd->data_retry_limit = data_retry_limit;
tx_cmd->rts_retry_limit = rts_retry_limit;
/* DATA packets will use the uCode station table for rate/antenna
@@ -261,10 +192,10 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- first_antenna(priv->hw_params.valid_tx_ant));
+ first_antenna(hw_params(priv).valid_tx_ant));
} else
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
@@ -322,30 +253,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_device_cmd *dev_cmd = NULL;
struct iwl_tx_cmd *tx_cmd;
- int txq_id;
- u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
u16 len;
u8 sta_id;
- u8 tid = 0;
unsigned long flags;
bool is_agg = false;
- /*
- * If the frame needs to go out off-channel, then
- * we'll have put the PAN context to that channel,
- * so make the frame go out there.
- */
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- else if (info->control.vif)
+ if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+ if (iwl_is_rfkill(priv->shrd)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock_priv;
}
@@ -382,7 +304,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
sta_priv = (void *)info->control.sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
@@ -395,52 +317,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ is_agg = true;
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
+ /* irqs already disabled/saved above when locking priv->shrd->lock */
+ spin_lock(&priv->shrd->sta_lock);
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = NULL;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
- goto drop_unlock_sta;
-
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
+ dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);
- tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
- if (unlikely(!tx_cmd))
+ if (unlikely(!dev_cmd))
goto drop_unlock_sta;
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
+ memset(dev_cmd, 0, sizeof(*dev_cmd));
+ tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
/* Total # bytes to be transmitted */
len = (u16)skb->len;
@@ -457,17 +346,16 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_update_stats(priv, true, fc, len);
- if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
- goto drop_unlock_sta;
+ memset(&info->status, 0, sizeof(info->status));
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
+ info->driver_data[0] = ctx;
+ info->driver_data[1] = dev_cmd;
+
+ if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+ goto drop_unlock_sta;
- spin_unlock(&priv->sta_lock);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
/*
* Avoid atomic ops if it isn't an associated client.
@@ -482,41 +370,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
return 0;
drop_unlock_sta:
- spin_unlock(&priv->sta_lock);
+ if (dev_cmd)
+ kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
+ spin_unlock(&priv->shrd->sta_lock);
drop_unlock_priv:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
return -1;
}
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
int sta_id;
- int tx_fifo;
- int txq_id;
int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
sta->addr, tid);
@@ -526,58 +393,29 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
}
- if (unlikely(tid >= MAX_TID_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
- txq_id = iwlagn_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- tid_data->agg.tx_fifo = tx_fifo;
- iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
+ ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
if (ret)
return ret;
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
+ tid, ssn);
+
return ret;
}
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
+ int sta_id;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
sta_id = iwl_sta_id(sta);
@@ -586,101 +424,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
return -ENXIO;
}
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&priv->sta_lock);
- spin_lock(&priv->lock);
-
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
- return 0;
-}
-
-int iwlagn_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
- struct iwl_rxon_context *ctx;
-
- ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
- lockdep_assert_held(&priv->sta_lock);
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = get_fifo_from_tid(ctx, tid);
- IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
- trans_txq_agg_disable(&priv->trans, txq_id,
- ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- }
-
- return 0;
+ return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
+ sta_id, tid);
}
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -702,147 +447,394 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
rcu_read_unlock();
}
-static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
- bool is_agg)
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_get_tx_fail_reason(u32 status)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
- if (!is_agg)
- iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
+ return "UNKNOWN";
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
}
+#endif /* CONFIG_IWLWIFI_DEBUG */
-int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
+ status &= AGG_TX_STATUS_MSK;
- if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
- "index %d is out of range [0-%d] %d %d.\n", __func__,
- txq_id, index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
+ switch (status) {
+ case AGG_TX_STATE_UNDERRUN_MSK:
+ priv->reply_agg_tx_stats.underrun++;
+ break;
+ case AGG_TX_STATE_BT_PRIO_MSK:
+ priv->reply_agg_tx_stats.bt_prio++;
+ break;
+ case AGG_TX_STATE_FEW_BYTES_MSK:
+ priv->reply_agg_tx_stats.few_bytes++;
+ break;
+ case AGG_TX_STATE_ABORT_MSK:
+ priv->reply_agg_tx_stats.abort++;
+ break;
+ case AGG_TX_STATE_LAST_SENT_TTL_MSK:
+ priv->reply_agg_tx_stats.last_sent_ttl++;
+ break;
+ case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
+ priv->reply_agg_tx_stats.last_sent_try++;
+ break;
+ case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
+ priv->reply_agg_tx_stats.last_sent_bt_kill++;
+ break;
+ case AGG_TX_STATE_SCD_QUERY_MSK:
+ priv->reply_agg_tx_stats.scd_query++;
+ break;
+ case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
+ priv->reply_agg_tx_stats.bad_crc32++;
+ break;
+ case AGG_TX_STATE_RESPONSE_MSK:
+ priv->reply_agg_tx_stats.response++;
+ break;
+ case AGG_TX_STATE_DUMP_TX_MSK:
+ priv->reply_agg_tx_stats.dump_tx++;
+ break;
+ case AGG_TX_STATE_DELAY_TX_MSK:
+ priv->reply_agg_tx_stats.delay_tx++;
+ break;
+ default:
+ priv->reply_agg_tx_stats.unknown++;
+ break;
}
+}
+
+static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
+ struct iwlagn_tx_resp *tx_resp)
+{
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
+ IWLAGN_TX_RES_TID_POS;
+ int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
+ IWLAGN_TX_RES_RA_POS;
+ struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ int i;
+
+ if (agg->wait_for_ba)
+ IWL_DEBUG_TX_REPLY(priv,
+ "got tx response w/o block-ack\n");
- for (index = iwl_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ agg->wait_for_ba = (tx_resp->frame_count > 1);
- tx_info = &txq->txb[txq->q.read_ptr];
+ /*
+ * If the BT kill count is non-zero, we'll get this
+ * notification again.
+ */
+ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
+ priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
+ }
- if (WARN_ON_ONCE(tx_info->skb == NULL))
- continue;
+ if (tx_resp->frame_count == 1)
+ return;
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
+ /* Construct bit-map of pending frames within Tx window */
+ for (i = 0; i < tx_resp->frame_count; i++) {
+ u16 fstatus = le16_to_cpu(frame_status[i].status);
- iwlagn_tx_status(priv, tx_info,
- txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
+ if (status & AGG_TX_STATUS_MSK)
+ iwlagn_count_agg_tx_err_status(priv, fstatus);
- iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
+ if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
- iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr);
+ IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
+ "try-count (0x%08x)\n",
+ iwl_get_agg_tx_fail_reason(fstatus),
+ fstatus & AGG_TX_STATUS_MSK,
+ fstatus & AGG_TX_TRY_MSK);
}
- return nfreed;
}
-/**
- * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
+const char *iwl_get_agg_tx_fail_reason(u16 status)
{
- int sh;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- struct ieee80211_tx_info *info;
- u64 bitmap, sent_bitmap;
-
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
+ status &= AGG_TX_STATUS_MSK;
+ switch (status) {
+ case AGG_TX_STATE_TRANSMITTED:
+ return "SUCCESS";
+ AGG_TX_STATE_FAIL(UNDERRUN_MSK);
+ AGG_TX_STATE_FAIL(BT_PRIO_MSK);
+ AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
+ AGG_TX_STATE_FAIL(ABORT_MSK);
+ AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
+ AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
+ AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
+ AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
+ AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
+ AGG_TX_STATE_FAIL(RESPONSE_MSK);
+ AGG_TX_STATE_FAIL(DUMP_TX_MSK);
+ AGG_TX_STATE_FAIL(DELAY_TX_MSK);
}
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0)
- sh += 0x100;
+ return "UNKNOWN";
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
- /*
- * Check for success or failure according to the
- * transmitted bitmap and block-ack bitmap
- */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
- sent_bitmap = bitmap & agg->bitmap;
+static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
- /* Sanity check values reported by uCode */
- if (ba_resp->txed_2_done > ba_resp->txed) {
- IWL_DEBUG_TX_REPLY(priv,
- "bogus sent(%d) and ack(%d) count\n",
- ba_resp->txed, ba_resp->txed_2_done);
- /*
- * set txed_2_done = txed,
- * so it won't impact rate scale
- */
- ba_resp->txed = ba_resp->txed_2_done;
- }
- IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
- ba_resp->txed, ba_resp->txed_2_done);
+static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
+{
+ status &= TX_STATUS_MSK;
- /* Find the first ACKed frame to store the TX status */
- while (sent_bitmap && !(sent_bitmap & 1)) {
- agg->start_idx = (agg->start_idx + 1) & 0xff;
- sent_bitmap >>= 1;
+ switch (status) {
+ case TX_STATUS_POSTPONE_DELAY:
+ priv->reply_tx_stats.pp_delay++;
+ break;
+ case TX_STATUS_POSTPONE_FEW_BYTES:
+ priv->reply_tx_stats.pp_few_bytes++;
+ break;
+ case TX_STATUS_POSTPONE_BT_PRIO:
+ priv->reply_tx_stats.pp_bt_prio++;
+ break;
+ case TX_STATUS_POSTPONE_QUIET_PERIOD:
+ priv->reply_tx_stats.pp_quiet_period++;
+ break;
+ case TX_STATUS_POSTPONE_CALC_TTAK:
+ priv->reply_tx_stats.pp_calc_ttak++;
+ break;
+ case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ priv->reply_tx_stats.int_crossed_retry++;
+ break;
+ case TX_STATUS_FAIL_SHORT_LIMIT:
+ priv->reply_tx_stats.short_limit++;
+ break;
+ case TX_STATUS_FAIL_LONG_LIMIT:
+ priv->reply_tx_stats.long_limit++;
+ break;
+ case TX_STATUS_FAIL_FIFO_UNDERRUN:
+ priv->reply_tx_stats.fifo_underrun++;
+ break;
+ case TX_STATUS_FAIL_DRAIN_FLOW:
+ priv->reply_tx_stats.drain_flow++;
+ break;
+ case TX_STATUS_FAIL_RFKILL_FLUSH:
+ priv->reply_tx_stats.rfkill_flush++;
+ break;
+ case TX_STATUS_FAIL_LIFE_EXPIRE:
+ priv->reply_tx_stats.life_expire++;
+ break;
+ case TX_STATUS_FAIL_DEST_PS:
+ priv->reply_tx_stats.dest_ps++;
+ break;
+ case TX_STATUS_FAIL_HOST_ABORTED:
+ priv->reply_tx_stats.host_abort++;
+ break;
+ case TX_STATUS_FAIL_BT_RETRY:
+ priv->reply_tx_stats.bt_retry++;
+ break;
+ case TX_STATUS_FAIL_STA_INVALID:
+ priv->reply_tx_stats.sta_invalid++;
+ break;
+ case TX_STATUS_FAIL_FRAG_DROPPED:
+ priv->reply_tx_stats.frag_drop++;
+ break;
+ case TX_STATUS_FAIL_TID_DISABLE:
+ priv->reply_tx_stats.tid_disable++;
+ break;
+ case TX_STATUS_FAIL_FIFO_FLUSHED:
+ priv->reply_tx_stats.fifo_flush++;
+ break;
+ case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
+ priv->reply_tx_stats.insuff_cf_poll++;
+ break;
+ case TX_STATUS_FAIL_PASSIVE_NO_RX:
+ priv->reply_tx_stats.fail_hw_drop++;
+ break;
+ case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
+ priv->reply_tx_stats.sta_color_mismatch++;
+ break;
+ default:
+ priv->reply_tx_stats.unknown++;
+ break;
}
+}
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = ba_resp->txed_2_done;
- info->status.ampdu_len = ba_resp->txed;
- iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+static void iwlagn_set_tx_status(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwlagn_tx_resp *tx_resp,
+ bool is_agg)
+{
+ u16 status = le16_to_cpu(tx_resp->status.status);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ if (is_agg)
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+ if (!iwl_is_tx_success(status))
+ iwlagn_count_tx_err_status(priv, status);
+}
- return 0;
+static void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ queue_work(priv->shrd->workqueue, &priv->tx_flush);
+ }
}
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
+ struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ struct ieee80211_hdr *hdr;
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ u32 ssn = iwlagn_get_scd_ssn(tx_resp);
+ int tid;
+ int sta_id;
+ int freed;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+ struct sk_buff_head skbs;
+ struct sk_buff *skb;
+ struct iwl_rxon_context *ctx;
+ bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
+
+ tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
+ IWLAGN_TX_RES_TID_POS;
+ sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
+ IWLAGN_TX_RES_RA_POS;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ if (is_agg)
+ iwl_rx_reply_tx_agg(priv, tx_resp);
+
+ if (tx_resp->frame_count == 1) {
+ __skb_queue_head_init(&skbs);
+ /*we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+ ssn, status, &skbs);
+ freed = 0;
+ while (!skb_queue_empty(&skbs)) {
+ skb = __skb_dequeue(&skbs);
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ priv->last_seq_ctl = tx_resp->seq_ctl;
+
+ info = IEEE80211_SKB_CB(skb);
+ ctx = info->driver_data[0];
+ kmem_cache_free(priv->tx_cmd_pool,
+ (info->driver_data[1]));
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
+ iwl_is_associated_ctx(ctx) && ctx->vif &&
+ ctx->vif->type == NL80211_IFTYPE_STATION) {
+ ctx->last_tx_rejected = true;
+ iwl_trans_stop_queue(trans(priv), txq_id);
+
+ IWL_DEBUG_TX_REPLY(priv,
+ "TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n",
+ txq_id,
+ iwl_get_tx_fail_reason(status),
+ status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ IWL_DEBUG_TX_REPLY(priv,
+ "FrameCnt = %d, idx=%d\n",
+ tx_resp->frame_count, cmd_index);
+ }
+
+ /* check if BAR is needed */
+ if (is_agg && !iwl_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
+ tx_resp, is_agg);
+ if (!is_agg)
+ iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
+
+ freed++;
+ }
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+ WARN_ON(!is_agg && freed != 1);
+ }
+
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
}
/**
@@ -851,17 +843,21 @@ void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
-void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
struct iwl_ht_agg *agg;
- int index;
+ struct sk_buff_head reclaimed_skbs;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ unsigned long flags;
int sta_id;
int tid;
- unsigned long flags;
+ int freed;
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -870,16 +866,18 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
- if (scd_flow >= priv->hw_params.max_txq_num) {
+ if (scd_flow >= hw_params(priv).max_txq_num) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
- return;
+ return 0;
}
- txq = &priv->txq[scd_flow];
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
+ agg = &priv->shrd->tid_data[sta_id][tid].agg;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
if (unlikely(agg->txq_id != scd_flow)) {
/*
* FIXME: this is a uCode bug which need to be addressed,
@@ -890,88 +888,84 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv,
"BA scd_flow %d does not match txq_id %d\n",
scd_flow, agg->txq_id);
- return;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
}
- /* Find index just before block-ack window */
- index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
"sta_id = %d\n",
agg->wait_for_ba,
(u8 *) &ba_resp->sta_addr_lo32,
ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
- "%d, scd_ssn = %d\n",
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
+ "scd_flow = %d, scd_ssn = %d\n",
ba_resp->tid,
ba_resp->seq_ctl,
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
ba_resp->scd_flow,
ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
- /* Update driver's record of ACK vs. not for each frame in window */
- iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = false;
+
+ /* Sanity check values reported by uCode */
+ if (ba_resp->txed_2_done > ba_resp->txed) {
+ IWL_DEBUG_TX_REPLY(priv,
+ "bogus sent(%d) and ack(%d) count\n",
+ ba_resp->txed, ba_resp->txed_2_done);
+ /*
+ * set txed_2_done = txed,
+ * so it won't impact rate scale
+ */
+ ba_resp->txed = ba_resp->txed_2_done;
+ }
+ IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
+ ba_resp->txed, ba_resp->txed_2_done);
+
+ __skb_queue_head_init(&reclaimed_skbs);
/* Release all TFDs before the SSN, i.e. all TFDs in front of
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq);
-
- iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
+ iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
+ 0, &reclaimed_skbs);
+ freed = 0;
+ while (!skb_queue_empty(&reclaimed_skbs)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
+ skb = __skb_dequeue(&reclaimed_skbs);
+ hdr = (struct ieee80211_hdr *)skb->data;
-#ifdef CONFIG_IWLWIFI_DEBUG
-const char *iwl_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+ info = IEEE80211_SKB_CB(skb);
+ kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_resp->txed_2_done;
+ info->status.ampdu_len = ba_resp->txed;
+ iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
+ info);
+ }
- switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_POSTPONE(DELAY);
- TX_STATUS_POSTPONE(FEW_BYTES);
- TX_STATUS_POSTPONE(BT_PRIO);
- TX_STATUS_POSTPONE(QUIET_PERIOD);
- TX_STATUS_POSTPONE(CALC_TTAK);
- TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
- TX_STATUS_FAIL(SHORT_LIMIT);
- TX_STATUS_FAIL(LONG_LIMIT);
- TX_STATUS_FAIL(FIFO_UNDERRUN);
- TX_STATUS_FAIL(DRAIN_FLOW);
- TX_STATUS_FAIL(RFKILL_FLUSH);
- TX_STATUS_FAIL(LIFE_EXPIRE);
- TX_STATUS_FAIL(DEST_PS);
- TX_STATUS_FAIL(HOST_ABORTED);
- TX_STATUS_FAIL(BT_RETRY);
- TX_STATUS_FAIL(STA_INVALID);
- TX_STATUS_FAIL(FRAG_DROPPED);
- TX_STATUS_FAIL(TID_DISABLE);
- TX_STATUS_FAIL(FIFO_FLUSHED);
- TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
- TX_STATUS_FAIL(PASSIVE_NO_RX);
- TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
}
- return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
}
-#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 56211006a18..8ba0dd54e37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -35,11 +35,11 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-agn-calib.h"
#include "iwl-trans.h"
+#include "iwl-fh.h"
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@@ -84,42 +84,37 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
priv->ucode_write_complete = 0;
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
- iwl_write_direct32(priv,
+ iwl_write_direct32(bus(priv),
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the %s uCode section due "
- "to interrupt\n", name);
- return ret;
- }
+ ret = wait_event_timeout(priv->shrd->wait_command_queue,
+ priv->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(priv, "Could not load the %s uCode section\n",
name);
@@ -163,7 +158,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -177,6 +172,42 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
(u8 *)&cmd, sizeof(cmd));
}
+static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+{
+ struct iwl_calib_temperature_offset_v2_cmd cmd;
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
+ EEPROM_KELVIN_TEMPERATURE);
+ __le16 *offset_calib_low =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ struct iwl_eeprom_calib_hdr *hdr;
+
+ memset(&cmd, 0, sizeof(cmd));
+ iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ EEPROM_CALIB_ALL);
+ memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
+ sizeof(*offset_calib_high));
+ memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
+ sizeof(*offset_calib_low));
+ if (!(cmd.radio_sensor_offset_low)) {
+ IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+ cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
+ cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
+ }
+ memcpy(&cmd.burntVoltageRef, &hdr->voltage,
+ sizeof(hdr->voltage));
+
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+ le16_to_cpu(cmd.radio_sensor_offset_high));
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+ le16_to_cpu(cmd.radio_sensor_offset_low));
+ IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+ le16_to_cpu(cmd.burntVoltageRef));
+
+ return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
+ (u8 *)&cmd, sizeof(cmd));
+}
+
static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
@@ -193,11 +224,12 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
-void iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
@@ -230,9 +262,10 @@ void iwlagn_rx_calib_result(struct iwl_priv *priv,
default:
IWL_ERR(priv, "Unknown calibration notification %d\n",
hdr->op_code);
- return;
+ return -1;
}
iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+ return 0;
}
int iwlagn_init_alive_start(struct iwl_priv *priv)
@@ -262,8 +295,12 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (priv->cfg->need_temp_offset_calib)
- return iwlagn_set_temperature_offset_calib(priv);
+ if (priv->cfg->need_temp_offset_calib) {
+ if (priv->cfg->temp_offset_v2)
+ return iwlagn_set_temperature_offset_calib_v2(priv);
+ else
+ return iwlagn_set_temperature_offset_calib(priv);
+ }
return 0;
}
@@ -291,7 +328,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return trans_send_cmd_pdu(&priv->trans,
+ return iwl_trans_send_cmd_pdu(trans(priv),
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
@@ -324,7 +361,7 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
sizeof(iwlagn_bt_prio_tbl));
- if (trans_send_cmd_pdu(&priv->trans,
+ if (iwl_trans_send_cmd_pdu(trans(priv),
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(priv, "failed to send BT prio tbl command\n");
@@ -337,7 +374,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
env_cmd.action = action;
env_cmd.type = type;
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
@@ -348,9 +385,21 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
static int iwlagn_alive_notify(struct iwl_priv *priv)
{
+ struct iwl_rxon_context *ctx;
int ret;
- trans_tx_start(&priv->trans);
+ if (!priv->tx_cmd_pool)
+ priv->tx_cmd_pool =
+ kmem_cache_create("iwlagn_dev_cmd",
+ sizeof(struct iwl_device_cmd),
+ sizeof(void *), 0, NULL);
+
+ if (!priv->tx_cmd_pool)
+ return -ENOMEM;
+
+ iwl_trans_tx_start(trans(priv));
+ for_each_context(priv, ctx)
+ ctx->last_tx_rejected = false;
ret = iwlagn_send_wimax_coex(priv);
if (ret)
@@ -369,7 +418,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
+static int iwl_verify_inst_sparse(struct iwl_priv *priv,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -383,9 +432,9 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -404,14 +453,14 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -427,7 +476,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
*/
static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
{
- if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
+ if (!iwl_verify_inst_sparse(priv, &img->code)) {
IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
return 0;
}
@@ -478,7 +527,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
int ret;
enum iwlagn_ucode_type old_type;
- ret = trans_start_device(&priv->trans);
+ ret = iwl_trans_start_device(trans(priv));
if (ret)
return ret;
@@ -495,7 +544,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
return ret;
}
- trans_kick_nic(&priv->trans);
+ iwl_trans_kick_nic(trans(priv));
/*
* Some things may run in the background now, but we
@@ -545,7 +594,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
if (!priv->ucode_init.code.len)
@@ -580,6 +629,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
iwlagn_remove_notification(priv, &calib_wait);
out:
/* Whatever happened, stop the device */
- trans_stop_device(&priv->trans);
+ iwl_trans_stop_device(trans(priv));
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f9c3cd95d61..ccba69b7f8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -35,7 +35,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
@@ -48,10 +47,9 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
+#include "iwl-shared.h"
#include "iwl-bus.h"
#include "iwl-trans.h"
@@ -79,9 +77,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
-
-static int iwlagn_ant_coupling;
-static bool iwlagn_bt_ch_announce = 1;
+MODULE_ALIAS("iwlagn");
void iwl_update_chain_flags(struct iwl_priv *priv)
{
@@ -138,7 +134,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
* beacon contents.
*/
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
@@ -183,7 +179,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate = info->control.rates[0].idx;
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ hw_params(priv).valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
@@ -203,7 +199,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
cmd.data[1] = priv->beacon_skb->data;
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -212,7 +208,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
container_of(work, struct iwl_priv, beacon_update);
struct sk_buff *beacon;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "updating beacon w/o beacon context!\n");
goto out;
@@ -242,7 +238,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
iwlagn_send_beacon_cmd(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwl_bg_bt_runtime_config(struct work_struct *work)
@@ -250,11 +246,11 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return;
iwlagn_send_advance_bt_config(priv);
}
@@ -265,13 +261,13 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
container_of(work, struct iwl_priv, bt_full_concurrency);
struct iwl_rxon_context *ctx;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
goto out;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
goto out;
IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
@@ -289,7 +285,7 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
iwlagn_send_advance_bt_config(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/**
@@ -306,11 +302,11 @@ static void iwl_bg_statistics_periodic(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return;
iwl_send_statistics_request(priv, CMD_ASYNC, false);
@@ -332,14 +328,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (iwl_grab_nic_access(priv)) {
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ spin_lock_irqsave(&bus(priv)->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(bus(priv))) {
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
return;
}
/* Set starting address; reads will auto-increment */
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, ptr);
rmb();
/*
@@ -347,20 +343,20 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
* place event id # at far right for easier visual parsing.
*/
for (i = 0; i < num_events; i++) {
- ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ ev = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ time = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
if (mode == 0) {
trace_iwlwifi_dev_ucode_cont_event(priv,
0, time, ev);
} else {
- data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ data = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
trace_iwlwifi_dev_ucode_cont_event(priv,
time, data, ev);
}
}
/* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ iwl_release_nic_access(bus(priv));
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -373,10 +369,12 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- capacity = iwl_read_targ_mem(priv, base);
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ capacity = iwl_read_targ_mem(bus(priv), base);
+ num_wraps = iwl_read_targ_mem(bus(priv),
+ base + (2 * sizeof(u32)));
+ mode = iwl_read_targ_mem(bus(priv), base + (1 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(bus(priv),
+ base + (3 * sizeof(u32)));
} else
return;
@@ -427,7 +425,7 @@ static void iwl_bg_ucode_trace(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (priv->event_log.ucode_trace) {
@@ -443,131 +441,17 @@ static void iwl_bg_tx_flush(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, tx_flush);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
/* do nothing if rf-kill is on */
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return;
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
-}
-static ssize_t store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- show_debug_level, store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-
-static ssize_t show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
-
-static ssize_t show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_is_ready_rf(priv))
- return sprintf(buf, "off\n");
- else
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in decimal form.\n", buf);
- else {
- ret = iwl_set_tx_power(priv, val, false);
- if (ret)
- IWL_ERR(priv, "failed setting tx power (0x%d).\n",
- ret);
- else
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl_sysfs_entries,
-};
-
/******************************************************************************
*
* uCode download functions
@@ -577,7 +461,7 @@ static struct attribute_group iwl_attribute_group = {
static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
{
if (desc->v_addr)
- dma_free_coherent(priv->bus->dev, desc->len,
+ dma_free_coherent(bus(priv)->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
@@ -604,7 +488,7 @@ static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
return -EINVAL;
}
- desc->v_addr = dma_alloc_coherent(priv->bus->dev, len,
+ desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
&desc->p_addr, GFP_KERNEL);
if (!desc->v_addr)
return -ENOMEM;
@@ -614,6 +498,64 @@ static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
return 0;
}
+static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+{
+ int i;
+
+ /*
+ * The default context is always valid,
+ * the PAN context depends on uCode.
+ */
+ priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
+ priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
+
+ for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+ priv->contexts[i].ctxid = i;
+
+ priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+ priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+ priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+ BIT(NL80211_IFTYPE_ADHOC);
+ priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+ BIT(NL80211_IFTYPE_STATION);
+ priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+ priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+ priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+ priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
+ REPLY_WIPAN_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
+ REPLY_WIPAN_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
+ priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
+ priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
+ priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
+ priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
+ priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+}
+
+
struct iwlagn_ucode_capabilities {
u32 max_probe_length;
u32 standard_phy_calibration_size;
@@ -621,7 +563,7 @@ struct iwlagn_ucode_capabilities {
};
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwl_mac_setup_register(struct iwl_priv *priv,
+static int iwlagn_mac_setup_register(struct iwl_priv *priv,
struct iwlagn_ucode_capabilities *capa);
#define UCODE_EXPERIMENTAL_INDEX 100
@@ -658,7 +600,7 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
priv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- priv->bus->dev,
+ bus(priv)->dev,
GFP_KERNEL, priv, iwl_ucode_callback);
}
@@ -738,8 +680,6 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
return 0;
}
-static int iwlagn_wanted_ucode_alternative = 1;
-
static int iwlagn_load_firmware(struct iwl_priv *priv,
const struct firmware *ucode_raw,
struct iwlagn_firmware_pieces *pieces,
@@ -749,7 +689,8 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
- int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
+ int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
+ int tmp;
u64 alternatives;
u32 tlv_len;
enum iwl_ucode_tlv_type tlv_type;
@@ -952,6 +893,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
int err;
struct iwlagn_firmware_pieces pieces;
const unsigned int api_max = priv->cfg->ucode_api_max;
+ unsigned int api_ok = priv->cfg->ucode_api_ok;
const unsigned int api_min = priv->cfg->ucode_api_min;
u32 api_ver;
char buildstr[25];
@@ -962,10 +904,13 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
};
+ if (!api_ok)
+ api_ok = api_max;
+
memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
- if (priv->fw_index <= priv->cfg->ucode_api_max)
+ if (priv->fw_index <= api_ok)
IWL_ERR(priv,
"request for firmware file '%s' failed.\n",
priv->firmware_name);
@@ -1011,12 +956,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto try_again;
}
- if (api_ver != api_max)
- IWL_ERR(priv,
- "Firmware has old API version. Expected v%u, "
- "got v%u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
+ if (api_ver < api_ok) {
+ if (api_ok != api_max)
+ IWL_ERR(priv, "Firmware has old API version, "
+ "expected v%u through v%u, got v%u.\n",
+ api_ok, api_max, api_ver);
+ else
+ IWL_ERR(priv, "Firmware has old API version, "
+ "expected v%u, got v%u.\n",
+ api_max, api_ver);
+ IWL_ERR(priv, "New firmware can be obtained from "
+ "http://www.intellinuxwireless.org/.\n");
+ }
}
if (build)
@@ -1060,25 +1011,25 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
pieces.init_data_size);
/* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ if (pieces.inst_size > hw_params(priv).max_inst_size) {
IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
pieces.inst_size);
goto try_again;
}
- if (pieces.data_size > priv->hw_params.max_data_size) {
+ if (pieces.data_size > hw_params(priv).max_data_size) {
IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
pieces.data_size);
goto try_again;
}
- if (pieces.init_size > priv->hw_params.max_inst_size) {
+ if (pieces.init_size > hw_params(priv).max_inst_size) {
IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
pieces.init_size);
goto try_again;
}
- if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ if (pieces.init_data_size > hw_params(priv).max_data_size) {
IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
pieces.init_data_size);
goto try_again;
@@ -1143,17 +1094,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if ((priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE) &&
- (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN)) {
- priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
+ if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+
+ /*
+ * if not PAN, then don't support P2P -- might be a uCode
+ * packaging bug or due to the eeprom check above
+ */
+ if (!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN))
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
+
+ if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- } else
+ priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ } else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
- priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- else
- priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ }
/*
* figure out the offset of chain noise reset and gain commands
@@ -1169,12 +1126,15 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->phy_calib_chain_noise_gain_cmd =
ucode_capa.standard_phy_calibration_size + 1;
+ /* initialize all valid contexts */
+ iwl_init_context(priv, ucode_capa.flags);
+
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
- err = iwl_mac_setup_register(priv, &ucode_capa);
+ err = iwlagn_mac_setup_register(priv, &ucode_capa);
if (err)
goto out_unbind;
@@ -1182,13 +1142,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
if (err)
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
- err = sysfs_create_group(&(priv->bus->dev->kobj),
- &iwl_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_unbind;
- }
-
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
complete(&priv->firmware_loading_complete);
@@ -1206,368 +1159,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
iwl_dealloc_ucode(priv);
out_unbind:
complete(&priv->firmware_loading_complete);
- device_release_driver(priv->bus->dev);
+ device_release_driver(bus(priv)->dev);
release_firmware(ucode_raw);
}
-static const char * const desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT",
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *desc_lookup(u32 num)
-{
- int i;
- int max = ARRAY_SIZE(desc_lookup_text);
-
- if (num < max)
- return desc_lookup_text[num];
-
- max = ARRAY_SIZE(advanced_lookup) - 1;
- for (i = 0; i < max; i++) {
- if (advanced_lookup[i].num == num)
- break;
- }
- return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 base;
- struct iwl_error_event_table table;
-
- base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
- if (!base)
- base = priv->init_errlog_ptr;
- } else {
- if (!base)
- base = priv->inst_errlog_ptr;
- }
-
- if (!iwlagn_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (priv->ucode_type == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return;
- }
-
- iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
-
- if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, table.valid);
- }
-
- priv->isr_stats.err_code = table.error_id;
-
- trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
- table.data1, table.data2, table.line,
- table.blink1, table.blink2, table.ilink1,
- table.ilink2, table.bcon_time, table.gp1,
- table.gp2, table.gp3, table.ucode_ver,
- table.hw_ver, table.brd_ver);
- IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
- desc_lookup(table.error_id));
- IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
- IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
- IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
- IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
- IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
- IWL_ERR(priv, "0x%08X | data1\n", table.data1);
- IWL_ERR(priv, "0x%08X | data2\n", table.data2);
- IWL_ERR(priv, "0x%08X | line\n", table.line);
- IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
- IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
- IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
- IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
- IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
- IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
- IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
- IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
- IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
- IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
-}
-
-#define EVENT_START_OFFSET (4 * sizeof(u32))
-
-/**
- * iwl_print_event_log - Dump error event log to syslog
- *
- */
-static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- u32 i;
- u32 base; /* SRAM byte address of event log header */
- u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
- u32 ptr; /* SRAM byte address of log data */
- u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
-
- if (num_events == 0)
- return pos;
-
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
- if (!base)
- base = priv->init_evtlog_ptr;
- } else {
- if (!base)
- base = priv->inst_evtlog_ptr;
- }
-
- if (mode == 0)
- event_size = 2 * sizeof(u32);
- else
- event_size = 3 * sizeof(u32);
-
- ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
- /* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- /* Set starting address; reads will auto-increment */
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
- rmb();
-
- /* "time" is actually "data" for mode 0 (no timestamp).
- * place event id # at far right for easier visual parsing. */
- for (i = 0; i < num_events; i++) {
- ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- if (mode == 0) {
- /* data, ev */
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "EVT_LOG:0x%08x:%04u\n",
- time, ev);
- } else {
- trace_iwlwifi_dev_ucode_event(priv, 0,
- time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
- time, ev);
- }
- } else {
- data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- } else {
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time,
- data, ev);
- }
- }
- }
-
- /* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return pos;
-}
-
-/**
- * iwl_print_last_event_logs - Dump the newest # of event log to syslog
- */
-static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- /*
- * display the newest DEFAULT_LOG_ENTRIES entries
- * i.e the entries just before the next ont that uCode would fill.
- */
- if (num_wraps) {
- if (next_entry < size) {
- pos = iwl_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode,
- pos, buf, bufsz);
- pos = iwl_print_event_log(priv, 0,
- next_entry, mode,
- pos, buf, bufsz);
- } else
- pos = iwl_print_event_log(priv, next_entry - size,
- size, mode, pos, buf, bufsz);
- } else {
- if (next_entry < size) {
- pos = iwl_print_event_log(priv, 0, next_entry,
- mode, pos, buf, bufsz);
- } else {
- pos = iwl_print_event_log(priv, next_entry - size,
- size, mode, pos, buf, bufsz);
- }
- }
- return pos;
-}
-
-#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-
-int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display)
-{
- u32 base; /* SRAM byte address of event log header */
- u32 capacity; /* event log capacity in # entries */
- u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
- u32 num_wraps; /* # times uCode wrapped to top of log */
- u32 next_entry; /* index of next entry to be written by uCode */
- u32 size; /* # entries that we'll print */
- u32 logsize;
- int pos = 0;
- size_t bufsz = 0;
-
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
- logsize = priv->init_evtlog_size;
- if (!base)
- base = priv->init_evtlog_ptr;
- } else {
- logsize = priv->inst_evtlog_size;
- if (!base)
- base = priv->inst_evtlog_ptr;
- }
-
- if (!iwlagn_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Invalid event log pointer 0x%08X for %s uCode\n",
- base,
- (priv->ucode_type == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return -EINVAL;
- }
-
- /* event log header */
- capacity = iwl_read_targ_mem(priv, base);
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
- if (capacity > logsize) {
- IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, logsize);
- capacity = logsize;
- }
-
- if (next_entry > logsize) {
- IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, logsize);
- next_entry = logsize;
- }
-
- size = num_wraps ? capacity : next_entry;
-
- /* bail out if nothing in log */
- if (size == 0) {
- IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return pos;
- }
-
- /* enable/disable bt channel inhibition */
- priv->bt_ch_announce = iwlagn_bt_ch_announce;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#else
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
- IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
- size);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- if (full_log)
- bufsz = capacity * 48;
- else
- bufsz = size * 48;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- }
- if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
- /*
- * if uCode has wrapped back to top of log,
- * start at the oldest entry,
- * i.e the next one that uCode would fill.
- */
- if (num_wraps)
- pos = iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode,
- pos, buf, bufsz);
- /* (then/else) start at top of log */
- pos = iwl_print_event_log(priv, 0,
- next_entry, mode, pos, buf, bufsz);
- } else
- pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#else
- pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#endif
- return pos;
-}
-
static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
@@ -1575,44 +1170,43 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
priv->thermal_throttle.ct_kill_toggle = false;
if (priv->cfg->base_params->support_ct_kill_exit) {
adv_cmd.critical_temperature_enter =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
+ cpu_to_le32(hw_params(priv).ct_kill_threshold);
adv_cmd.critical_temperature_exit =
- cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
+ cpu_to_le32(hw_params(priv).ct_kill_exit_threshold);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature enter is %d,"
- "exit is %d\n",
- priv->hw_params.ct_kill_threshold,
- priv->hw_params.ct_kill_exit_threshold);
+ "succeeded, critical temperature enter is %d,"
+ "exit is %d\n",
+ hw_params(priv).ct_kill_threshold,
+ hw_params(priv).ct_kill_exit_threshold);
} else {
cmd.critical_temperature_R =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
+ cpu_to_le32(hw_params(priv).ct_kill_threshold);
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature is %d\n",
- priv->hw_params.ct_kill_threshold);
+ "succeeded, "
+ "critical temperature is %d\n",
+ hw_params(priv).ct_kill_threshold);
}
}
@@ -1626,10 +1220,10 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
};
memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
- calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
@@ -1641,7 +1235,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
if (IWL_UCODE_API(priv->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return trans_send_cmd_pdu(&priv->trans,
+ return iwl_trans_send_cmd_pdu(trans(priv),
TX_ANT_CONFIGURATION_CMD,
CMD_SYNC,
sizeof(struct iwl_tx_ant_config_cmd),
@@ -1663,17 +1257,17 @@ int iwl_alive_start(struct iwl_priv *priv)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
/*TODO: this should go to the transport layer */
- iwl_reset_ict(priv);
+ iwl_reset_ict(trans(priv));
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
/* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->status);
+ set_bit(STATUS_ALIVE, &priv->shrd->status);
/* Enable watchdog to monitor the driver tx queues */
iwl_setup_watchdog(priv);
- if (iwl_is_rfkill(priv))
+ if (iwl_is_rfkill(priv->shrd))
return -ERFKILL;
/* download priority table before any calibration request */
@@ -1710,8 +1304,9 @@ int iwl_alive_start(struct iwl_priv *priv)
iwl_send_bt_config(priv);
}
- if (priv->hw_params.calib_rt_cfg)
- iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
+ if (hw_params(priv).calib_rt_cfg)
+ iwlagn_send_calib_cfg_rt(priv,
+ hw_params(priv).calib_rt_cfg);
ieee80211_wake_queues(priv->hw);
@@ -1720,7 +1315,7 @@ int iwl_alive_start(struct iwl_priv *priv)
/* Configure Tx antenna selection based on H/W config */
iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
- if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
+ if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
struct iwl_rxon_cmd *active_rxon =
(struct iwl_rxon_cmd *)&ctx->active;
/* apply any changes in staging */
@@ -1735,12 +1330,12 @@ int iwl_alive_start(struct iwl_priv *priv)
iwlagn_set_rxon_chain(priv, ctx);
}
- if (!priv->wowlan) {
+ if (!priv->shrd->wowlan) {
/* WoWLAN ucode will not reply in the same way, skip it */
iwl_reset_run_time_calib(priv);
}
- set_bit(STATUS_READY, &priv->status);
+ set_bit(STATUS_READY, &priv->shrd->status);
/* Configure the adapter for unassociated operation */
ret = iwlagn_commit_rxon(priv, ctx);
@@ -1765,7 +1360,15 @@ static void __iwl_down(struct iwl_priv *priv)
iwl_scan_cancel_timeout(priv, 200);
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+ /*
+ * If active, scanning won't cancel it, so say it expired.
+ * No race since we hold the mutex here and a new one
+ * can't come in at this time.
+ */
+ ieee80211_remain_on_channel_expired(priv->hw);
+
+ exit_pending =
+ test_and_set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
@@ -1790,32 +1393,33 @@ static void __iwl_down(struct iwl_priv *priv)
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
+ iwl_trans_stop_device(trans(priv));
+
/* Clear out all status bits but a few that are stable across reset */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+ priv->shrd->status &=
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+ test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+ test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) <<
STATUS_EXIT_PENDING;
- trans_stop_device(&priv->trans);
-
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = NULL;
}
static void iwl_down(struct iwl_priv *priv)
{
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
__iwl_down(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
iwl_cancel_deferred_work(priv);
}
@@ -1827,9 +1431,9 @@ static int __iwl_up(struct iwl_priv *priv)
struct iwl_rxon_context *ctx;
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
}
@@ -1862,9 +1466,9 @@ static int __iwl_up(struct iwl_priv *priv)
return 0;
error:
- set_bit(STATUS_EXIT_PENDING, &priv->status);
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
__iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
IWL_ERR(priv, "Unable to initialize device.\n");
return ret;
@@ -1882,11 +1486,11 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv,
run_time_calib_work);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- mutex_unlock(&priv->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ mutex_unlock(&priv->shrd->mutex);
return;
}
@@ -1895,7 +1499,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
iwl_sensitivity_calibration(priv);
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwlagn_prepare_restart(struct iwl_priv *priv)
@@ -1907,7 +1511,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
u8 bt_status;
bool bt_is_sco;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
for_each_context(priv, ctx)
ctx->vif = NULL;
@@ -1941,13 +1545,13 @@ static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- mutex_lock(&priv->mutex);
+ if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
+ mutex_lock(&priv->shrd->mutex);
iwlagn_prepare_restart(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
iwl_cancel_deferred_work(priv);
ieee80211_restart_hw(priv->hw);
} else {
@@ -1955,94 +1559,6 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int wait)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- /* Not supported if we don't have PAN */
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
- ret = -EOPNOTSUPP;
- goto free;
- }
-
- /* Not supported on pre-P2P firmware */
- if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
- BIT(NL80211_IFTYPE_P2P_CLIENT))) {
- ret = -EOPNOTSUPP;
- goto free;
- }
-
- mutex_lock(&priv->mutex);
-
- if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
- /*
- * If the PAN context is free, use the normal
- * way of doing remain-on-channel offload + TX.
- */
- ret = 1;
- goto out;
- }
-
- /* TODO: queue up if scanning? */
- if (test_bit(STATUS_SCANNING, &priv->status) ||
- priv->offchan_tx_skb) {
- ret = -EBUSY;
- goto out;
- }
-
- /*
- * max_scan_ie_len doesn't include the blank SSID or the header,
- * so need to add that again here.
- */
- if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
- ret = -ENOBUFS;
- goto out;
- }
-
- priv->offchan_tx_skb = skb;
- priv->offchan_tx_timeout = wait;
- priv->offchan_tx_chan = chan;
-
- ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
- IWL_SCAN_OFFCH_TX, chan->band);
- if (ret)
- priv->offchan_tx_skb = NULL;
- out:
- mutex_unlock(&priv->mutex);
- free:
- if (ret < 0)
- kfree_skb(skb);
-
- return ret;
-}
-
-static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- mutex_lock(&priv->mutex);
-
- if (!priv->offchan_tx_skb) {
- ret = -EINVAL;
- goto unlock;
- }
-
- priv->offchan_tx_skb = NULL;
-
- ret = iwl_scan_cancel_timeout(priv, 200);
- if (ret)
- ret = -EIO;
-unlock:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-
/*****************************************************************************
*
* mac80211 entry point functions
@@ -2124,7 +1640,7 @@ iwlagn_iface_combinations_p2p[] = {
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_mac_setup_register(struct iwl_priv *priv,
+static int iwlagn_mac_setup_register(struct iwl_priv *priv,
struct iwlagn_ucode_capabilities *capa)
{
int ret;
@@ -2183,7 +1699,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
- if (priv->ucode_wowlan.code.len && device_can_wakeup(priv->bus->dev)) {
+ if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
@@ -2242,16 +1758,16 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "enter\n");
/* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ret = __iwl_up(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
if (ret)
return ret;
IWL_DEBUG_INFO(priv, "Start UP work done.\n");
/* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
ret = -EIO;
iwlagn_led_enable(priv);
@@ -2274,17 +1790,17 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
iwl_down(priv);
- flush_workqueue(priv->workqueue);
+ flush_workqueue(priv->shrd->workqueue);
/* User space software may expect getting rfkill changes
* even if interface is down */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
iwl_enable_rfkill_int(priv);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int iwlagn_send_patterns(struct iwl_priv *priv,
struct cfg80211_wowlan *wowlan)
{
@@ -2322,7 +1838,7 @@ static int iwlagn_send_patterns(struct iwl_priv *priv,
}
cmd.data[0] = pattern_cmd;
- err = trans_send_cmd(&priv->trans, &cmd);
+ err = iwl_trans_send_cmd(trans(priv), &cmd);
kfree(pattern_cmd);
return err;
}
@@ -2337,7 +1853,8 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
if (iwlagn_mod_params.sw_crypto)
return;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
goto out;
@@ -2348,7 +1865,8 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
priv->have_rekey_data = true;
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
}
struct wowlan_key_data {
@@ -2359,7 +1877,7 @@ struct wowlan_key_data {
bool error, use_rsc_tsc, use_tkip;
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
int i;
@@ -2386,7 +1904,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
u16 p1k[IWLAGN_P1K_SIZE];
int ret, i;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
@@ -2491,7 +2009,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
break;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
@@ -2516,7 +2034,8 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (WARN_ON(!wowlan))
return -EINVAL;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
/* Don't attempt WoWLAN when not associated, tear down instead. */
if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
@@ -2545,7 +2064,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
* since the uCode will add 0x10 before using the value.
*/
for (i = 0; i < 8; i++) {
- seq = priv->stations[IWL_AP_ID].tid[i].seq_number;
+ seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
seq -= 0x10;
wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
}
@@ -2577,9 +2096,9 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
memcpy(&rxon, &ctx->active, sizeof(rxon));
- trans_stop_device(&priv->trans);
+ iwl_trans_stop_device(trans(priv));
- priv->wowlan = true;
+ priv->shrd->wowlan = true;
ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
IWL_UCODE_WOWLAN);
@@ -2610,11 +2129,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
ieee80211_iter_keys(priv->hw, ctx->vif,
iwlagn_wowlan_program_keys,
&key_data);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (key_data.error) {
ret = -EIO;
goto error;
@@ -2629,13 +2148,13 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
.len[0] = sizeof(*key_data.rsc_tsc),
};
- ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd);
+ ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
if (ret)
goto error;
}
if (key_data.use_tkip) {
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_WOWLAN_TKIP_PARAMS,
CMD_SYNC, sizeof(tkip_cmd),
&tkip_cmd);
@@ -2651,7 +2170,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
kek_kck_cmd.replay_ctr = priv->replay_ctr;
- ret = trans_send_cmd_pdu(&priv->trans,
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_WOWLAN_KEK_KCK_MATERIAL,
CMD_SYNC, sizeof(kek_kck_cmd),
&kek_kck_cmd);
@@ -2660,7 +2179,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
}
}
- ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER,
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
CMD_SYNC, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
@@ -2670,21 +2189,23 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (ret)
goto error;
- device_set_wakeup_enable(priv->bus->dev, true);
+ device_set_wakeup_enable(bus(priv)->dev, true);
/* Now let the ucode operate on its own */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
goto out;
error:
- priv->wowlan = false;
+ priv->shrd->wowlan = false;
iwlagn_prepare_restart(priv);
ieee80211_restart_hw(priv->hw);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
kfree(key_data.rsc_tsc);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
return ret;
}
@@ -2697,21 +2218,22 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
u32 base, status = 0xffffffff;
int ret = -EIO;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
base = priv->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&priv->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(priv);
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(bus(priv));
if (ret == 0) {
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(priv);
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(bus(priv));
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (ret == 0) {
@@ -2722,7 +2244,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
if (priv->wowlan_sram)
_iwl_read_targ_mem_words(
- priv, 0x800000, priv->wowlan_sram,
+ bus(priv), 0x800000, priv->wowlan_sram,
priv->ucode_wowlan.data.len / 4);
}
#endif
@@ -2731,9 +2253,9 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- priv->wowlan = false;
+ priv->shrd->wowlan = false;
- device_set_wakeup_enable(priv->bus->dev, false);
+ device_set_wakeup_enable(bus(priv)->dev, false);
iwlagn_prepare_restart(priv);
@@ -2741,7 +2263,8 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
ieee80211_resume_disconnect(vif);
@@ -2810,7 +2333,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
return 0;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_scan_cancel_timeout(priv, 100);
BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
@@ -2861,7 +2384,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -2876,6 +2399,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct iwl_priv *priv = hw->priv;
int ret = -EINVAL;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -2883,7 +2407,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -2893,17 +2418,12 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
ret = 0;
break;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
- if (ret == 0) {
- priv->agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
- }
break;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
@@ -2913,9 +2433,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
priv->agg_tids_count);
}
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
ret = 0;
- if (priv->cfg->ht_params &&
+ if (!priv->agg_tids_count && priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation) {
/*
* switch off RTS/CTS if it was previously enabled
@@ -2929,8 +2449,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_OPERATIONAL:
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid,
- buf_size);
+ iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
+ tid, buf_size);
/*
* If the limit is 0, then it wasn't initialised yet,
@@ -2962,6 +2482,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
sta_priv->lq_sta.lq.general_params.flags |=
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
}
+ priv->agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
sta_priv->max_agg_bufsize;
@@ -2974,8 +2497,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = 0;
break;
}
- mutex_unlock(&priv->mutex);
-
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
@@ -2987,15 +2510,15 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret;
+ int ret = 0;
u8 sta_id;
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
sta->addr);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
+ sta_priv->sta_id = IWL_INVALID_STATION;
atomic_set(&sta_priv->pending_frames, 0);
if (vif->type == NL80211_IFTYPE_AP)
@@ -3007,19 +2530,20 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
+ goto out;
}
- sta_priv->common.sta_id = sta_id;
+ sta_priv->sta_id = sta_id;
/* Initialize rate scaling */
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return ret;
}
static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
@@ -3043,14 +2567,14 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (iwl_is_rfkill(priv))
+ if (iwl_is_rfkill(priv->shrd))
goto out;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
@@ -3069,7 +2593,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- spin_lock_irq(&priv->lock);
+ spin_lock_irq(&priv->shrd->lock);
priv->current_ht_config.smps = conf->smps_mode;
@@ -3099,23 +2623,23 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
- spin_unlock_irq(&priv->lock);
+ spin_unlock_irq(&priv->shrd->lock);
iwl_set_rate(priv);
/*
* at this point, staging_rxon has the
* configuration for channel switch
*/
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -3145,7 +2669,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
#undef CHK
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
for_each_context(priv, ctx) {
ctx->staging.filter_flags &= ~filter_nand;
@@ -3157,7 +2681,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
*/
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
/*
* Receiving all multicast frames is always enabled by the
@@ -3173,14 +2697,14 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = hw->priv;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
goto done;
}
- if (iwl_is_rfkill(priv)) {
+ if (iwl_is_rfkill(priv->shrd)) {
IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
goto done;
}
@@ -3197,101 +2721,198 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
}
}
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwlagn_wait_tx_queue_empty(priv);
+ iwl_trans_wait_tx_queue_empty(trans(priv));
done:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static void iwlagn_disable_roc(struct iwl_priv *priv)
+void iwlagn_disable_roc(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (!ctx->is_active)
+ if (!priv->hw_roc_setup)
return;
- ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_set_rxon_channel(priv, chan, ctx);
- iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
priv->hw_roc_channel = NULL;
+ memset(ctx->staging.node_addr, 0, ETH_ALEN);
+
iwlagn_commit_rxon(priv, ctx);
ctx->is_active = false;
+ priv->hw_roc_setup = false;
}
-static void iwlagn_bg_roc_done(struct work_struct *work)
+static void iwlagn_disable_roc_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
- hw_roc_work.work);
+ hw_roc_disable_work.work);
- mutex_lock(&priv->mutex);
- ieee80211_remain_on_channel_expired(priv->hw);
+ mutex_lock(&priv->shrd->mutex);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
-static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type,
int duration)
{
struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
int err = 0;
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
return -EOPNOTSUPP;
- if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
- BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
return -EOPNOTSUPP;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
- if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
- test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
err = -EBUSY;
goto out;
}
- priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
priv->hw_roc_channel = channel;
priv->hw_roc_chantype = channel_type;
- priv->hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
- iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
- queue_delayed_work(priv->workqueue, &priv->hw_roc_work,
- msecs_to_jiffies(duration + 20));
+ priv->hw_roc_duration = duration;
+ priv->hw_roc_start_notified = false;
+ cancel_delayed_work(&priv->hw_roc_disable_work);
+
+ if (!ctx->is_active) {
+ ctx->is_active = true;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ memcpy(ctx->staging.node_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ memcpy(ctx->staging.bssid_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err)
+ goto out;
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err) {
+ iwlagn_disable_roc(priv);
+ goto out;
+ }
+ priv->hw_roc_setup = true;
+ }
- msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
- ieee80211_ready_on_channel(priv->hw);
+ err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+ if (err)
+ iwlagn_disable_roc(priv);
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
}
-static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
return -EOPNOTSUPP;
- cancel_delayed_work_sync(&priv->hw_roc_work);
-
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+ if (ret)
+ goto out;
+
+ if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+ ret = -EIO;
+ goto out_remove_sta;
+ }
+
+ memcpy(ctx->bssid, bssid, ETH_ALEN);
+ ctx->preauth_bssid = true;
+
+ ret = iwlagn_commit_rxon(priv, ctx);
+
+ if (ret == 0)
+ goto out;
+
+ out_remove_sta:
+ iwl_remove_station(priv, sta_id, bssid);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx))
+ goto out;
+
+ iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+ ctx->preauth_bssid = false;
+ /* no need to commit */
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
/*****************************************************************************
*
* driver setup and teardown
@@ -3300,9 +2921,9 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+ priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME);
- init_waitqueue_head(&priv->wait_command_queue);
+ init_waitqueue_head(&priv->shrd->wait_command_queue);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3310,7 +2931,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
- INIT_DELAYED_WORK(&priv->hw_roc_work, iwlagn_bg_roc_done);
+ INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
+ iwlagn_disable_roc_work);
iwl_setup_scan_deferred_work(priv);
@@ -3342,6 +2964,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->bt_full_concurrency);
cancel_work_sync(&priv->bt_runtime_config);
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
@@ -3372,10 +2995,9 @@ static int iwl_init_drv(struct iwl_priv *priv)
{
int ret;
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
+ spin_lock_init(&priv->shrd->sta_lock);
- mutex_init(&priv->mutex);
+ mutex_init(&priv->shrd->mutex);
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
@@ -3416,7 +3038,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
goto err;
}
- ret = iwlcore_init_geos(priv);
+ ret = iwl_init_geos(priv);
if (ret) {
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
@@ -3434,8 +3056,10 @@ err:
static void iwl_uninit_drv(struct iwl_priv *priv)
{
iwl_calib_free_results(priv);
- iwlcore_free_geos(priv);
+ iwl_free_geos(priv);
iwl_free_channel_map(priv);
+ if (priv->tx_cmd_pool)
+ kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -3443,12 +3067,13 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
#endif
}
-static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = hw->priv;
- mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist) {
@@ -3463,89 +3088,88 @@ static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
"ignoring RSSI callback\n");
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+ return 0;
}
struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
.start = iwlagn_mac_start,
.stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = iwlagn_mac_suspend,
.resume = iwlagn_mac_resume,
#endif
- .add_interface = iwl_mac_add_interface,
- .remove_interface = iwl_mac_remove_interface,
- .change_interface = iwl_mac_change_interface,
+ .add_interface = iwlagn_mac_add_interface,
+ .remove_interface = iwlagn_mac_remove_interface,
+ .change_interface = iwlagn_mac_change_interface,
.config = iwlagn_mac_config,
.configure_filter = iwlagn_configure_filter,
.set_key = iwlagn_mac_set_key,
.update_tkip_key = iwlagn_mac_update_tkip_key,
.set_rekey_data = iwlagn_mac_set_rekey_data,
- .conf_tx = iwl_mac_conf_tx,
+ .conf_tx = iwlagn_mac_conf_tx,
.bss_info_changed = iwlagn_bss_info_changed,
.ampdu_action = iwlagn_mac_ampdu_action,
- .hw_scan = iwl_mac_hw_scan,
+ .hw_scan = iwlagn_mac_hw_scan,
.sta_notify = iwlagn_mac_sta_notify,
.sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwl_mac_sta_remove,
+ .sta_remove = iwlagn_mac_sta_remove,
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
- .tx_last_beacon = iwl_mac_tx_last_beacon,
- .remain_on_channel = iwl_mac_remain_on_channel,
- .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
- .offchannel_tx = iwl_mac_offchannel_tx,
- .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
- .rssi_callback = iwl_mac_rssi_callback,
- CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
- CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
+ .tx_last_beacon = iwlagn_mac_tx_last_beacon,
+ .remain_on_channel = iwlagn_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+ .rssi_callback = iwlagn_mac_rssi_callback,
+ CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+ .tx_sync = iwlagn_mac_tx_sync,
+ .finish_tx_sync = iwlagn_mac_finish_tx_sync,
+ .set_tim = iwlagn_mac_set_tim,
};
static u32 iwl_hw_detect(struct iwl_priv *priv)
{
- return iwl_read32(priv, CSR_HW_REV);
+ return iwl_read32(bus(priv), CSR_HW_REV);
}
+/* Size of one Rx buffer in host DRAM */
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
+
static int iwl_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
if (iwlagn_mod_params.amsdu_size_8K)
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
+ hw_params(priv).rx_page_order =
+ get_order(IWL_RX_BUF_SIZE_8K);
else
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
- priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
+ hw_params(priv).rx_page_order =
+ get_order(IWL_RX_BUF_SIZE_4K);
if (iwlagn_mod_params.disable_11n)
priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ hw_params(priv).num_ampdu_queues =
+ priv->cfg->base_params->num_of_ampdu_queues;
+ hw_params(priv).shadow_reg_enable =
+ priv->cfg->base_params->shadow_reg_enable;
+ hw_params(priv).sku = priv->cfg->sku;
+ hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
+
/* Device-specific setup */
return priv->cfg->lib->set_hw_params(priv);
}
-static const u8 iwlagn_bss_ac_to_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
-};
-
-static const u8 iwlagn_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
-
-static const u8 iwlagn_pan_ac_to_fifo[] = {
- IWL_TX_FIFO_VO_IPAN,
- IWL_TX_FIFO_VI_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWL_TX_FIFO_BK_IPAN,
-};
-
-static const u8 iwlagn_pan_ac_to_queue[] = {
- 7, 6, 5, 4,
-};
-
/* This function both allocates and initializes hw and priv. */
static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
{
@@ -3568,66 +3192,8 @@ out:
return hw;
}
-static void iwl_init_context(struct iwl_priv *priv)
-{
- int i;
-
- /*
- * The default context is always valid,
- * more may be discovered when firmware
- * is loaded.
- */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION);
- priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
- priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
- REPLY_WIPAN_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
- REPLY_WIPAN_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
- priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
- priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
- priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
- priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
- priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
- BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
-#ifdef CONFIG_IWL_P2P
- priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
- BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
-#endif
- priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
- priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
- priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-}
-
-int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
+int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
+ struct iwl_cfg *cfg)
{
int err = 0;
struct iwl_priv *priv;
@@ -3645,24 +3211,32 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
}
priv = hw->priv;
- priv->bus = bus;
- bus_set_drv_data(priv->bus, priv);
+ priv->shrd = &priv->_shrd;
+ bus->shrd = priv->shrd;
+ priv->shrd->bus = bus;
+ priv->shrd->priv = priv;
+
+ priv->shrd->trans = trans_ops->alloc(priv->shrd);
+ if (priv->shrd->trans == NULL) {
+ err = -ENOMEM;
+ goto out_free_traffic_mem;
+ }
/* At this point both hw and priv are allocated. */
- SET_IEEE80211_DEV(hw, priv->bus->dev);
+ SET_IEEE80211_DEV(hw, bus(priv)->dev);
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
- priv->inta_mask = CSR_INI_SET_MASK;
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
- (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
- true : false;
+ (iwlagn_mod_params.ant_coupling >
+ IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
+ true : false;
/* enable/disable bt channel inhibition */
- priv->bt_ch_announce = iwlagn_bt_ch_announce;
+ priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
(priv->bt_ch_announce) ? "On" : "Off");
@@ -3672,15 +3246,15 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
+ spin_lock_init(&bus(priv)->reg_lock);
+ spin_lock_init(&priv->shrd->lock);
/*
* stop and reset the on-board processor just in case it is in a
* strange state ... like being left stranded by a primary kernel
* and this is now the kdump kernel trying to start up
*/
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/***********************
* 3. Read REV register
@@ -3689,11 +3263,11 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, hw_rev);
- err = iwl_trans_register(&priv->trans, priv);
+ err = iwl_trans_request_irq(trans(priv));
if (err)
- goto out_free_traffic_mem;
+ goto out_free_trans;
- if (trans_prepare_card_hw(&priv->trans)) {
+ if (iwl_trans_prepare_card_hw(trans(priv))) {
err = -EIO;
IWL_WARN(priv, "Failed, HW not ready\n");
goto out_free_trans;
@@ -3729,9 +3303,6 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
priv->hw->wiphy->n_addresses++;
}
- /* initialize all valid contexts */
- iwl_init_context(priv);
-
/************************
* 5. Setup HW constants
************************/
@@ -3764,13 +3335,14 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
iwl_enable_rfkill_int(priv);
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ if (iwl_read32(bus(priv),
+ CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
+ set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
@@ -3784,13 +3356,13 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
return 0;
out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+ destroy_workqueue(priv->shrd->workqueue);
+ priv->shrd->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
iwl_eeprom_free(priv);
out_free_trans:
- trans_free(&priv->trans);
+ iwl_trans_free(trans(priv));
out_free_traffic_mem:
iwl_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
@@ -3800,21 +3372,17 @@ out:
void __devexit iwl_remove(struct iwl_priv * priv)
{
- unsigned long flags;
-
wait_for_completion(&priv->firmware_loading_complete);
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
- sysfs_remove_group(&priv->bus->dev->kobj,
- &iwl_attribute_group);
- /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
+ /* ieee80211_unregister_hw call wil cause iwlagn_mac_stop to
* to be called and iwl_down since we are removing the device
* we need to set STATUS_EXIT_PENDING bit.
*/
- set_bit(STATUS_EXIT_PENDING, &priv->status);
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
iwl_testmode_cleanup(priv);
iwl_leds_exit(priv);
@@ -3824,40 +3392,26 @@ void __devexit iwl_remove(struct iwl_priv * priv)
priv->mac80211_registered = 0;
}
- /* Reset to low power before unloading driver. */
- iwl_apm_stop(priv);
-
iwl_tt_exit(priv);
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- trans_sync_irq(&priv->trans);
+ /*This will stop the queues, move the device to low power state */
+ iwl_trans_stop_device(trans(priv));
iwl_dealloc_ucode(priv);
- trans_rx_free(&priv->trans);
- trans_tx_free(&priv->trans);
-
iwl_eeprom_free(priv);
/*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
+ flush_workqueue(priv->shrd->workqueue);
- /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
+ /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
+ * priv->shrd->workqueue... so we can't take down the workqueue
* until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+ destroy_workqueue(priv->shrd->workqueue);
+ priv->shrd->workqueue = NULL;
iwl_free_traffic_mem(priv);
- trans_free(&priv->trans);
-
- bus_set_drv_data(priv->bus, NULL);
+ iwl_trans_free(trans(priv));
iwl_uninit_drv(priv);
@@ -3906,7 +3460,8 @@ module_exit(iwl_exit);
module_init(iwl_init);
#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug, iwlagn_mod_params.debug_level, uint,
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
@@ -3922,18 +3477,21 @@ MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
- S_IRUGO);
+module_param_named(ucode_alternative,
+ iwlagn_mod_params.wanted_ucode_alternative,
+ int, S_IRUGO);
MODULE_PARM_DESC(ucode_alternative,
"specify ucode alternative to use from ucode file");
-module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
+module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
+ int, S_IRUGO);
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (defualt: 0 dB)");
-module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
+module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
+ bool, S_IRUGO);
MODULE_PARM_DESC(bt_ch_inhibition,
- "Disable BT channel inhibition (default: enable)");
+ "Enable BT channel inhibition (default: enable)");
module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
@@ -3979,6 +3537,11 @@ module_param_named(power_level, iwlagn_mod_params.power_level,
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
+module_param_named(auto_agg, iwlagn_mod_params.auto_agg,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(auto_agg,
+ "enable agg w/o check traffic load (default: enable)");
+
/*
* For now, keep using power level 1 instead of automatically
* adjusting ...
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index d941c4c98e4..5b936ec1a54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,54 +65,9 @@
#include "iwl-dev.h"
-/* configuration for the _agn devices */
-extern struct iwl_cfg iwl5300_agn_cfg;
-extern struct iwl_cfg iwl5100_agn_cfg;
-extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bgn_cfg;
-extern struct iwl_cfg iwl5100_abg_cfg;
-extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6005_2agn_cfg;
-extern struct iwl_cfg iwl6005_2abg_cfg;
-extern struct iwl_cfg iwl6005_2bg_cfg;
-extern struct iwl_cfg iwl1030_bgn_cfg;
-extern struct iwl_cfg iwl1030_bg_cfg;
-extern struct iwl_cfg iwl6030_2agn_cfg;
-extern struct iwl_cfg iwl6030_2abg_cfg;
-extern struct iwl_cfg iwl6030_2bgn_cfg;
-extern struct iwl_cfg iwl6030_2bg_cfg;
-extern struct iwl_cfg iwl6000i_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2abg_cfg;
-extern struct iwl_cfg iwl6000i_2bg_cfg;
-extern struct iwl_cfg iwl6000_3agn_cfg;
-extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl6150_bgn_cfg;
-extern struct iwl_cfg iwl6150_bg_cfg;
-extern struct iwl_cfg iwl1000_bgn_cfg;
-extern struct iwl_cfg iwl1000_bg_cfg;
-extern struct iwl_cfg iwl100_bgn_cfg;
-extern struct iwl_cfg iwl100_bg_cfg;
-extern struct iwl_cfg iwl130_bgn_cfg;
-extern struct iwl_cfg iwl130_bg_cfg;
-extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
-extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
-extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
-extern struct iwl_cfg iwl105_bgn_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
-extern struct iwl_cfg iwl135_bgn_cfg;
-
-extern struct iwl_mod_params iwlagn_mod_params;
-
extern struct ieee80211_ops iwlagn_hw_ops;
-int iwl_reset_ict(struct iwl_priv *priv);
+int iwl_reset_ict(struct iwl_trans *trans);
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
@@ -122,10 +77,6 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
-/* tx queue */
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
-
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -137,8 +88,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
/* uCode */
-void iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+int iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
void iwlagn_send_prio_tbl(struct iwl_priv *priv);
int iwlagn_run_init_ucode(struct iwl_priv *priv);
@@ -147,13 +99,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwlagn_ucode_type ucode_type);
/* lib */
-void iwl_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status);
-int iwlagn_hw_valid_rtc_data_addr(u32 addr);
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
-int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
@@ -161,25 +109,19 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
/* rx */
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
-void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* tx */
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int index);
-void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
-int iwlagn_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id);
-void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
@@ -207,17 +149,14 @@ static inline bool iwl_is_tx_success(u32 status)
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* scan */
-int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwlagn_post_scan(struct iwl_priv *priv);
-
-/* station mgmt */
-int iwlagn_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
+void iwlagn_disable_roc(struct iwl_priv *priv);
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -232,7 +171,120 @@ static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
#endif
+
/* station management */
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
+int iwl_send_add_sta(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *sta, u8 flags);
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
+int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init);
+void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rxon_context *ctx;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+
+ priv->ucode_key_table = 0;
+
+ for_each_context(priv, ctx) {
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+}
+
+static inline int iwl_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
+ struct iwl_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
+
int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -286,7 +338,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
}
/* eeprom */
-void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
+void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
/* notification wait support */
@@ -309,20 +361,22 @@ extern int iwlagn_init_alive_start(struct iwl_priv *priv);
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
-extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
-extern int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len);
+extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
+ int len);
+extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
extern void iwl_testmode_cleanup(struct iwl_priv *priv);
#else
static inline
-int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
return -ENOSYS;
}
static inline
-int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index f3ee1c0c004..08b97594e30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -60,16 +60,68 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#ifndef __iwl_pci_h__
-#define __iwl_pci_h__
+#ifndef __iwl_bus_h__
+#define __iwl_bus_h__
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+/**
+ * DOC: Bus layer - role and goal
+ *
+ * iwl-bus.h defines the API to the bus layer of the iwlwifi driver.
+ * The bus layer is responsible for doing very basic bus operations that are
+ * listed in the iwl_bus_ops structure.
+ * The bus layer registers to the bus driver, advertises the supported HW and
+ * gets notifications about enumeration, suspend, resume.
+ * For the moment, the bus layer is not a linux kernel module as itself, and
+ * the module_init function of the driver must call the bus specific
+ * registration functions. These functions are listed at the end of this file.
+ * For the moment, there is only one implementation of this interface: PCI-e.
+ * This implementation is iwl-pci.c
+ */
+
+/**
+ * DOC: encapsulation and type safety
+ *
+ * The iwl_bus describes the data that is shared amongst all the bus layer
+ * implementations. This data is visible to other layers. Data in the bus
+ * specific area is not visible outside the bus specific implementation.
+ * iwl_bus holds a pointer to iwl_shared which holds pointer to all the other
+ * layers of the driver (iwl_priv, iwl_trans). In fact, this is the way to go
+ * when the transport layer needs to call a function of another layer.
+ *
+ * In order to achieve encapsulation, iwl_priv cannot be dereferenced from the
+ * bus layer. Type safety is still kept since functions that gets iwl_priv gets
+ * a typed pointer (as opposed to void *).
+ */
+
+/**
+ * DOC: probe flow
+ *
+ * The module_init calls the bus specific registration function. The
+ * registration to the bus layer will trigger an enumeration of the bus which
+ * will call the bus specific probe function.
+ * The first thing this function must do is to allocate the memory needed by
+ * iwl_bus + the bus_specific data.
+ * Once the bus specific probe function has configured the hardware, it
+ * chooses the appropriate transport layer and calls iwl_probe that will run
+ * the bus independent probe flow.
+ *
+ * Note: The bus specific code must set the following data in iwl_bus before it
+ * calls iwl_probe:
+ * * bus->dev
+ * * bus->irq
+ * * bus->ops
+ */
+
+struct iwl_shared;
struct iwl_bus;
/**
* struct iwl_bus_ops - bus specific operations
* @get_pm_support: must returns true if the bus can go to sleep
- * @apm_config: will be called during the config of the APM configuration
- * @set_drv_data: set the drv_data pointer to the bus layer
+ * @apm_config: will be called during the config of the APM
* @get_hw_id: prints the hw_id in the provided buffer
* @write8: write a byte to register at offset ofs
* @write32: write a dword to register at offset ofs
@@ -78,20 +130,32 @@ struct iwl_bus;
struct iwl_bus_ops {
bool (*get_pm_support)(struct iwl_bus *bus);
void (*apm_config)(struct iwl_bus *bus);
- void (*set_drv_data)(struct iwl_bus *bus, void *drv_data);
void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
};
+/**
+ * struct iwl_bus - bus common data
+ *
+ * This data is common to all bus layer implementations.
+ *
+ * @dev - pointer to struct device * that represents the device
+ * @ops - pointer to iwl_bus_ops
+ * @shrd - pointer to iwl_shared which holds shared data from the upper layer
+ * NB: for the time being this needs to be set by the upper layer since
+ * it allocates the shared data
+ * @irq - the irq number for the device
+ * @reg_lock - protect hw register access
+ */
struct iwl_bus {
- /* Common data to all buses */
- void *drv_data; /* driver's context */
struct device *dev;
- struct iwl_bus_ops *ops;
+ const struct iwl_bus_ops *ops;
+ struct iwl_shared *shrd;
unsigned int irq;
+ spinlock_t reg_lock;
/* pointer to bus specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -108,11 +172,6 @@ static inline void bus_apm_config(struct iwl_bus *bus)
bus->ops->apm_config(bus);
}
-static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data)
-{
- bus->ops->set_drv_data(bus, drv_data);
-}
-
static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
{
bus->ops->get_hw_id(bus, buf, buf_len);
@@ -133,7 +192,10 @@ static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
return bus->ops->read32(bus, ofs);
}
+/*****************************************************
+* Bus layer registration functions
+******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
-#endif
+#endif /* __iwl_bus_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index f9630a3c79f..2a2dc4597ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -60,29 +60,58 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-/*
- * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- */
-
-#ifndef __iwl_5000_hw_h__
-#define __iwl_5000_hw_h__
-
-/* 5150 only */
-#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
+#ifndef __iwl_pci_h__
+#define __iwl_pci_h__
-static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
-{
- u16 temperature, voltage;
- __le16 *temp_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
- temperature = le16_to_cpu(temp_calib[0]);
- voltage = le16_to_cpu(temp_calib[1]);
-
- /* offset = temp - volt / coeff */
- return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
-}
+/*
+ * This file declares the config structures for all devices.
+ */
-#endif /* __iwl_5000_hw_h__ */
+extern struct iwl_cfg iwl5300_agn_cfg;
+extern struct iwl_cfg iwl5100_agn_cfg;
+extern struct iwl_cfg iwl5350_agn_cfg;
+extern struct iwl_cfg iwl5100_bgn_cfg;
+extern struct iwl_cfg iwl5100_abg_cfg;
+extern struct iwl_cfg iwl5150_agn_cfg;
+extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6005_2agn_cfg;
+extern struct iwl_cfg iwl6005_2abg_cfg;
+extern struct iwl_cfg iwl6005_2bg_cfg;
+extern struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern struct iwl_cfg iwl6005_2agn_d_cfg;
+extern struct iwl_cfg iwl1030_bgn_cfg;
+extern struct iwl_cfg iwl1030_bg_cfg;
+extern struct iwl_cfg iwl6030_2agn_cfg;
+extern struct iwl_cfg iwl6030_2abg_cfg;
+extern struct iwl_cfg iwl6030_2bgn_cfg;
+extern struct iwl_cfg iwl6030_2bg_cfg;
+extern struct iwl_cfg iwl6000i_2agn_cfg;
+extern struct iwl_cfg iwl6000i_2abg_cfg;
+extern struct iwl_cfg iwl6000i_2bg_cfg;
+extern struct iwl_cfg iwl6000_3agn_cfg;
+extern struct iwl_cfg iwl6050_2agn_cfg;
+extern struct iwl_cfg iwl6050_2abg_cfg;
+extern struct iwl_cfg iwl6150_bgn_cfg;
+extern struct iwl_cfg iwl6150_bg_cfg;
+extern struct iwl_cfg iwl1000_bgn_cfg;
+extern struct iwl_cfg iwl1000_bg_cfg;
+extern struct iwl_cfg iwl100_bgn_cfg;
+extern struct iwl_cfg iwl100_bg_cfg;
+extern struct iwl_cfg iwl130_bgn_cfg;
+extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl105_bg_cfg;
+extern struct iwl_cfg iwl105_bgn_cfg;
+extern struct iwl_cfg iwl105_bgn_d_cfg;
+extern struct iwl_cfg iwl135_bg_cfg;
+extern struct iwl_cfg iwl135_bgn_cfg;
+#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9e9d1d1778..69d5f85d11e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -69,6 +69,9 @@
#ifndef __iwl_commands_h__
#define __iwl_commands_h__
+#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
+
struct iwl_priv;
/* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -86,6 +89,7 @@ struct iwl_priv;
enum {
REPLY_ALIVE = 0x1,
REPLY_ERROR = 0x2,
+ REPLY_ECHO = 0x3, /* test command */
/* RXON and QOS commands */
REPLY_RXON = 0x10,
@@ -670,7 +674,6 @@ struct iwl_rxon_assoc_cmd {
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
* REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
@@ -806,6 +809,7 @@ struct iwl_qosparam_cmd {
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
+#define IWL_MAX_TID_COUNT 9
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -3067,17 +3071,29 @@ struct iwl_missed_beacon_notif {
/* number of additional entries for enhanced tbl */
#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
-#define HD_INA_NON_SQUARE_DET_OFDM_DATA cpu_to_le16(0)
-#define HD_INA_NON_SQUARE_DET_CCK_DATA cpu_to_le16(0)
-#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA cpu_to_le16(0)
-#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(668)
-#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
-#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(486)
-#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(37)
-#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(853)
-#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4)
-#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(476)
-#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(99)
+#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1 cpu_to_le16(0)
+#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1 cpu_to_le16(0)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1 cpu_to_le16(0)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(668)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(486)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(37)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(853)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(476)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(99)
+
+#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2 cpu_to_le16(1)
+#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2 cpu_to_le16(1)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2 cpu_to_le16(1)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(600)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(40)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(486)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(45)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(853)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(60)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(476)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(99)
/* Control field in struct iwl_sensitivity_cmd */
@@ -3198,12 +3214,17 @@ enum iwl_ucode_calib_cfg {
IWL_CALIB_CFG_LO_IDX | \
IWL_CALIB_CFG_TX_IQ_IDX | \
IWL_CALIB_CFG_RX_IQ_IDX | \
- IWL_CALIB_CFG_NOISE_IDX | \
- IWL_CALIB_CFG_CRYSTAL_IDX | \
+ IWL_CALIB_CFG_CRYSTAL_IDX)
+
+#define IWL_CALIB_RT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
+ IWL_CALIB_CFG_DC_IDX | \
+ IWL_CALIB_CFG_LO_IDX | \
+ IWL_CALIB_CFG_TX_IQ_IDX | \
+ IWL_CALIB_CFG_RX_IQ_IDX | \
IWL_CALIB_CFG_TEMPERATURE_IDX | \
IWL_CALIB_CFG_PAPD_IDX | \
- IWL_CALIB_CFG_SENSITIVITY_IDX | \
- IWL_CALIB_CFG_TX_PWR_IDX)
+ IWL_CALIB_CFG_TX_PWR_IDX | \
+ IWL_CALIB_CFG_CRYSTAL_IDX)
#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
@@ -3253,6 +3274,14 @@ struct iwl_calib_temperature_offset_cmd {
__le16 reserved;
} __packed;
+struct iwl_calib_temperature_offset_v2_cmd {
+ struct iwl_calib_hdr hdr;
+ __le16 radio_sensor_offset_high;
+ __le16 radio_sensor_offset_low;
+ __le16 burntVoltageRef;
+ __le16 reserved;
+} __packed;
+
/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
struct iwl_calib_chain_noise_reset_cmd {
struct iwl_calib_hdr hdr;
@@ -3897,6 +3926,7 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
* Union of all expected notifications/responses:
*
*****************************************************************************/
+#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
struct iwl_rx_packet {
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index cf376f62b2f..b247a56d513 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -34,29 +34,26 @@
#include <net/mac80211.h>
#include "iwl-eeprom.h"
-#include "iwl-dev.h" /* FIXME: remove */
#include "iwl-debug.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
-u32 iwl_debug_level;
-
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
+static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *ht_info,
enum ieee80211_band band)
{
u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 rx_chains_num = hw_params(priv).rx_chains_num;
+ u8 tx_chains_num = hw_params(priv).tx_chains_num;
ht_info->cap = 0;
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
@@ -68,7 +65,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
+ if (hw_params(priv).ht40_channel & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
ht_info->mcs.rx_mask[4] = 0x01;
@@ -106,9 +103,9 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
}
/**
- * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
*/
-int iwlcore_init_geos(struct iwl_priv *priv)
+int iwl_init_geos(struct iwl_priv *priv)
{
struct iwl_channel_info *ch;
struct ieee80211_supported_band *sband;
@@ -121,16 +118,16 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
return 0;
}
- channels = kzalloc(sizeof(struct ieee80211_channel) *
- priv->channel_count, GFP_KERNEL);
+ channels = kcalloc(priv->channel_count,
+ sizeof(struct ieee80211_channel), GFP_KERNEL);
if (!channels)
return -ENOMEM;
- rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
+ rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
GFP_KERNEL);
if (!rates) {
kfree(channels);
@@ -145,7 +142,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
+ iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
sband = &priv->bands[IEEE80211_BAND_2GHZ];
@@ -155,7 +152,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
+ iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
priv->ieee_channels = channels;
@@ -211,7 +208,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
char buf[32];
- bus_get_hw_id(priv->bus, buf, sizeof(buf));
+ bus_get_hw_id(bus(priv), buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n", buf);
priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
@@ -221,19 +218,19 @@ int iwlcore_init_geos(struct iwl_priv *priv)
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
return 0;
}
/*
- * iwlcore_free_geos - undo allocations in iwlcore_init_geos
+ * iwl_free_geos - undo allocations in iwl_init_geos
*/
-void iwlcore_free_geos(struct iwl_priv *priv)
+void iwl_free_geos(struct iwl_priv *priv)
{
kfree(priv->ieee_channels);
kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ clear_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
}
static bool iwl_is_channel_extension(struct iwl_priv *priv,
@@ -323,9 +320,9 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
u16 beacon_int;
struct ieee80211_vif *vif = ctx->vif;
- conf = ieee80211_get_hw_conf(priv->hw);
+ conf = &priv->hw->conf;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
@@ -359,7 +356,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
} else {
beacon_int = iwl_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * TIME_UNIT);
+ IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
}
@@ -378,7 +375,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
le32_to_cpu(ctx->timing.beacon_init_val),
le16_to_cpu(ctx->timing.atim_window));
- return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd,
+ return iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_timing_cmd,
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
}
@@ -804,21 +801,23 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
{
/*
* MULTI-FIXME
- * See iwl_mac_channel_switch.
+ * See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->shrd->status))
ieee80211_chswitch_done(ctx->vif, is_success);
}
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+ enum iwl_rxon_context_id ctxid)
{
+ struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
struct iwl_rxon_cmd *rxon = &ctx->staging;
IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
@@ -856,18 +855,18 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
unsigned long reload_jiffies;
/* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
+ set_bit(STATUS_FW_ERROR, &priv->shrd->status);
/* Cancel currently queued command. */
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
iwlagn_abort_notification_waits(priv);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
- clear_bit(STATUS_READY, &priv->status);
+ clear_bit(STATUS_READY, &priv->shrd->status);
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->shrd->wait_command_queue);
if (!ondemand) {
/*
@@ -890,63 +889,26 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
priv->reload_count = 0;
}
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
if (iwlagn_mod_params.restart_fw) {
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+ IWL_DEBUG_FW_ERRORS(priv,
"Restarting adapter due to uCode error.\n");
- queue_work(priv->workqueue, &priv->restart);
+ queue_work(priv->shrd->workqueue, &priv->restart);
} else
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+ IWL_DEBUG_FW_ERRORS(priv,
"Detected FW error, but not restarting\n");
}
}
-/**
- * iwl_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_irq_handle_error(struct iwl_priv *priv)
-{
- /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (priv->cfg->internal_wimax_coex &&
- (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
- APMS_CLK_VAL_MRB_FUNC_MODE) ||
- (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
- APMG_PS_CTRL_VAL_RESET_REQ))) {
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the ready bit.
- */
- clear_bit(STATUS_READY, &priv->status);
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- wake_up_interruptible(&priv->wait_command_queue);
- IWL_ERR(priv, "RF is used by WiMAX\n");
- return;
- }
-
- IWL_ERR(priv, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
-
- iwl_dump_nic_error_log(priv);
- iwl_dump_csr(priv);
- iwl_dump_fh(priv, NULL, false);
- iwl_dump_nic_event_log(priv, false, NULL, false);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
- iwl_print_rx_config_cmd(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
- iwlagn_fw_error(priv, false);
-}
-
static int iwl_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
/* stop device's busmaster DMA activity */
- iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+ iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ ret = iwl_poll_bit(bus(priv), CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret)
IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
@@ -960,13 +922,13 @@ void iwl_apm_stop(struct iwl_priv *priv)
{
IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &priv->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
/* Stop device's DMA activity */
iwl_apm_stop_master(priv);
/* Reset the entire device */
- iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
@@ -974,7 +936,7 @@ void iwl_apm_stop(struct iwl_priv *priv)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
@@ -994,45 +956,45 @@ int iwl_apm_init(struct iwl_priv *priv)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
* don't wait for ICH L0s (ICH bug W/A)
*/
- iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
/* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+ iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
/*
* Enable HAP INTA (interrupt from management bus) to
* wake device's PCI Express link L1a -> L0s
*/
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
- bus_apm_config(priv->bus);
+ bus_apm_config(bus(priv));
/* Configure analog phase-lock-loop before activating to D0A */
if (priv->cfg->base_params->pll_cfg_val)
- iwl_set_bit(priv, CSR_ANA_PLL_CFG,
+ iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
priv->cfg->base_params->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/*
* Wait for clock stabilization; once stabilized, access to
* device-internal resources is supported, e.g. iwl_write_prph()
* and accesses to uCode SRAM.
*/
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
if (ret < 0) {
@@ -1047,14 +1009,14 @@ int iwl_apm_init(struct iwl_priv *priv)
* do not disable clocks. This preserves any hardware bits already
* set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
/* Disable L1-Active */
- iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- set_bit(STATUS_DEVICE_ENABLED, &priv->status);
+ set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
out:
return ret;
@@ -1068,7 +1030,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
bool defer;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (priv->tx_power_user_lmt == tx_power && !force)
return 0;
@@ -1088,7 +1050,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return -EIO;
/* scan complete and commit_rxon use tx_power_next value,
@@ -1096,7 +1058,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
priv->tx_power_next = tx_power;
/* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ defer = test_bit(STATUS_SCANNING, &priv->shrd->status) ||
memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
if (defer && !force) {
IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
@@ -1134,7 +1096,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -1147,24 +1109,20 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
};
if (flags & CMD_ASYNC)
- return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
+ return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
CMD_ASYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
else
- return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
+ return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
CMD_SYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
-void iwl_clear_isr_stats(struct iwl_priv *priv)
-{
- memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx;
@@ -1173,7 +1131,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (!iwl_is_ready_rf(priv)) {
+ if (!iwl_is_ready_rf(priv->shrd)) {
IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
return -EIO;
}
@@ -1185,7 +1143,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
q = AC_NUM - 1 - queue;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->shrd->lock, flags);
/*
* MULTI-FIXME
@@ -1203,13 +1161,13 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
-int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
+int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
@@ -1231,7 +1189,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
struct ieee80211_vif *vif = ctx->vif;
int err;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/*
* This variable will be correct only when there's just
@@ -1262,7 +1220,8 @@ static int iwl_setup_interface(struct iwl_priv *priv,
return 0;
}
-int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1273,9 +1232,13 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
viftype, vif->addr);
- mutex_lock(&priv->mutex);
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ iwlagn_disable_roc(priv);
- if (!iwl_is_ready_rf(priv)) {
+ if (!iwl_is_ready_rf(priv->shrd)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
goto out;
@@ -1318,7 +1281,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ctx->vif = NULL;
priv->iw_mode = NL80211_IFTYPE_STATION;
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1330,7 +1293,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
if (priv->scan_vif == vif) {
iwl_scan_cancel_timeout(priv, 200);
@@ -1354,7 +1317,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
priv->bt_traffic_load = priv->last_bt_traffic_load;
}
-void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -1362,14 +1325,20 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- WARN_ON(ctx->vif != vif);
+ if (WARN_ON(ctx->vif != vif)) {
+ struct iwl_rxon_context *tmp;
+ IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+ for_each_context(priv, tmp)
+ IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+ tmp->ctxid, tmp, tmp->vif);
+ }
ctx->vif = NULL;
iwl_teardown_interface(priv, vif, false);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1393,7 +1362,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
{
u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
- if (iwl_debug_level & IWL_DL_TX) {
+ if (iwl_get_debug_level(priv->shrd) & IWL_DL_TX) {
if (!priv->tx_traffic) {
priv->tx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1401,7 +1370,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
return -ENOMEM;
}
}
- if (iwl_debug_level & IWL_DL_RX) {
+ if (iwl_get_debug_level(priv->shrd) & IWL_DL_RX) {
if (!priv->rx_traffic) {
priv->rx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1428,7 +1397,7 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
__le16 fc;
u16 len;
- if (likely(!(iwl_debug_level & IWL_DL_TX)))
+ if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_TX)))
return;
if (!priv->tx_traffic)
@@ -1452,7 +1421,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
__le16 fc;
u16 len;
- if (likely(!(iwl_debug_level & IWL_DL_RX)))
+ if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_RX)))
return;
if (!priv->rx_traffic)
@@ -1609,7 +1578,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
static void iwl_force_rf_reset(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return;
if (!iwl_is_any_associated(priv)) {
@@ -1634,7 +1603,7 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
{
struct iwl_force_reset *force_reset;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EINVAL;
if (mode >= IWL_MAX_FORCE_RESET) {
@@ -1680,8 +1649,9 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
-int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
+int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
@@ -1691,11 +1661,13 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 interface_modes;
int err;
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
newtype = ieee80211_iftype_p2p(newtype, newp2p);
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
- if (!ctx->vif || !iwl_is_ready_rf(priv)) {
+ if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
/*
* Huh? But wait ... this can maybe happen when
* we're in the middle of a firmware restart!
@@ -1757,36 +1729,45 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = 0;
out:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
return err;
}
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
+int iwl_cmd_echo_test(struct iwl_priv *priv)
{
- struct iwl_tx_queue *txq = &priv->txq[cnt];
- struct iwl_queue *q = &txq->q;
- unsigned long timeout;
int ret;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ECHO,
+ .flags = CMD_SYNC,
+ };
- if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
- return 0;
- }
-
- timeout = txq->time_stamp +
- msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ if (ret)
+ IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
+ else
+ IWL_DEBUG_INFO(priv, "echo testing pass\n");
+ return ret;
+}
- if (time_after(jiffies, timeout)) {
- IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
- q->id, priv->cfg->base_params->wd_timeout);
+static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
+{
+ if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
+ int ret;
+ if (txq == priv->shrd->cmd_queue) {
+ /*
+ * validate command queue still working
+ * by sending "ECHO" command
+ */
+ if (!iwl_cmd_echo_test(priv))
+ return 0;
+ else
+ IWL_DEBUG_HC(priv, "echo testing fail\n");
+ }
ret = iwl_force_reset(priv, IWL_FW_RESET, false);
return (ret == -EAGAIN) ? 0 : 1;
}
-
return 0;
}
@@ -1806,7 +1787,10 @@ void iwl_bg_watchdog(unsigned long data)
int cnt;
unsigned long timeout;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ return;
+
+ if (iwl_is_rfkill(priv->shrd))
return;
timeout = priv->cfg->base_params->wd_timeout;
@@ -1814,14 +1798,14 @@ void iwl_bg_watchdog(unsigned long data)
return;
/* monitor and check for stuck cmd queue */
- if (iwl_check_stuck_queue(priv, priv->cmd_queue))
+ if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
return;
/* monitor and check for other stuck queues */
if (iwl_is_any_associated(priv)) {
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
/* skip as we already checked the command queue */
- if (cnt == priv->cmd_queue)
+ if (cnt == priv->shrd->cmd_queue)
continue;
if (iwl_check_stuck_queue(priv, cnt))
return;
@@ -1843,6 +1827,28 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
del_timer(&priv->watchdog);
}
+/**
+ * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
/*
* extended beacon time format
* time in usec will be changed into a 32-bit value in extended:internal format
@@ -1859,13 +1865,12 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
return 0;
quot = (usec / interval) &
- (iwl_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits) >>
- priv->hw_params.beacon_time_tsf_bits);
+ (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
+ IWLAGN_EXT_BEACON_TIME_POS);
rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
+ IWLAGN_EXT_BEACON_TIME_POS);
- return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+ return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
}
/* base is usually what we get from ucode with each received frame,
@@ -1875,64 +1880,80 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval)
{
u32 base_low = base & iwl_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
+ IWLAGN_EXT_BEACON_TIME_POS);
u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
+ IWLAGN_EXT_BEACON_TIME_POS);
u32 interval = beacon_interval * TIME_UNIT;
u32 res = (base & iwl_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits)) +
+ IWLAGN_EXT_BEACON_TIME_POS)) +
(addon & iwl_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits));
+ IWLAGN_EXT_BEACON_TIME_POS));
if (base_low > addon_low)
res += base_low - addon_low;
else if (base_low < addon_low) {
res += interval + base_low - addon_low;
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
} else
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
return cpu_to_le32(res);
}
-#ifdef CONFIG_PM
+void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid)
+{
+ struct ieee80211_vif *vif;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
-int iwl_suspend(struct iwl_priv *priv)
+ if (ctx == NUM_IWL_RXON_CTX)
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+}
+
+void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid)
{
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
- * first but since iwl_mac_stop() has no knowledge of who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- *
- * But of course ... if we have configured WoWLAN then we did other
- * things already :-)
- */
- if (!priv->wowlan)
- iwl_apm_stop(priv);
+ struct ieee80211_vif *vif;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
- return 0;
+ if (ctx == NUM_IWL_RXON_CTX)
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
}
-int iwl_resume(struct iwl_priv *priv)
+void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
{
- bool hw_rfkill = false;
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+}
- iwl_enable_interrupts(priv);
+void iwl_nic_config(struct iwl_priv *priv)
+{
+ priv->cfg->lib->nic_config(priv);
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
+}
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info;
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+ info = IEEE80211_SKB_CB(skb);
+ kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+ dev_kfree_skb_any(skb);
+}
- return 0;
+void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac)
+{
+ ieee80211_stop_queue(priv->hw, ac);
}
-#endif /* CONFIG_PM */
+void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac)
+{
+ ieee80211_wake_queue(priv->hw, ac);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 02817a43855..137da338070 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -64,6 +64,7 @@
#define __iwl_core_h__
#include "iwl-dev.h"
+#include "iwl-io.h"
/************************
* forward declarations *
@@ -71,15 +72,8 @@
struct iwl_host_cmd;
struct iwl_cmd;
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
#define TIME_UNIT 1024
-#define IWL_CMD(x) case x: return #x
-
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
@@ -101,23 +95,6 @@ struct iwl_lib_ops {
void (*temperature)(struct iwl_priv *priv);
};
-struct iwl_mod_params {
- int sw_crypto; /* def: 0 = using hardware encryption */
- int num_of_queues; /* def: HW dependent */
- int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
- int antenna; /* def: 0 = both antennas (use diversity) */
- int restart_fw; /* def: 1 = restart firmware */
- bool plcp_check; /* def: true = enable plcp health check */
- bool ack_check; /* def: false = disable ack health check */
- bool wd_disable; /* def: false = enable stuck queue check */
- bool bt_coex_active; /* def: true = enable bt coex */
- int led_mode; /* def: 0 = system default */
- bool no_sleep_autoadjust; /* def: true = disable autoadjust */
- bool power_save; /* def: false = disable power save */
- int power_level; /* def: 1 = power level */
-};
-
/*
* @max_ll_items: max number of OTP blocks
* @shadow_ram_support: shadow support for OTP memory
@@ -132,10 +109,10 @@ struct iwl_mod_params {
* radio tuning when there is a high receiving plcp error rate
* @chain_noise_scale: default chain noise scale used for gain computation
* @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
*/
struct iwl_base_params {
int eeprom_size;
@@ -147,17 +124,16 @@ struct iwl_base_params {
const u16 max_ll_items;
const bool shadow_ram_support;
u16 led_compensation;
- int chain_noise_num_beacons;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
s32 chain_noise_scale;
unsigned int wd_timeout;
- bool temperature_kelvin;
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool no_idle_support;
+ const bool hd_v2;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
@@ -194,6 +170,8 @@ struct iwl_ht_params {
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ * without a warning, for use in transitions
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @valid_tx_ant: valid transmit antenna
* @valid_rx_ant: valid receive antenna
@@ -214,20 +192,12 @@ struct iwl_ht_params {
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
* @iq_invert: I/Q inversion
+ * @temp_offset_v2: support v2 of temperature offset calibration
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
* highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- * Driver interacts with Firmware API version >= 2.
- * } else {
- * Driver interacts with Firmware API version 1.
- * }
+ * it has a supported API version.
*
* The ideal usage of this infrastructure is to treat a new ucode API
* release as a new hardware revision.
@@ -237,6 +207,7 @@ struct iwl_cfg {
const char *name;
const char *fw_name_pre;
const unsigned int ucode_api_max;
+ const unsigned int ucode_api_ok;
const unsigned int ucode_api_min;
u8 valid_tx_ant;
u8 valid_rx_ant;
@@ -259,15 +230,17 @@ struct iwl_cfg {
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
const bool iq_invert;
+ const bool temp_offset_v2;
};
/***************************
* L i b *
***************************/
-int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
-int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
+int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -287,18 +260,17 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
-void iwl_irq_handle_error(struct iwl_priv *priv);
-int iwl_mac_add_interface(struct ieee80211_hw *hw,
+int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-int iwl_mac_change_interface(struct ieee80211_hw *hw,
+int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
+int iwl_cmd_echo_test(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
-void iwl_reset_traffic_log(struct iwl_priv *priv);
void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header);
void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
@@ -349,9 +321,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
******************************************************************************/
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
-int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
-int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
@@ -359,12 +331,6 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes);
-u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
int __must_check iwl_scan_initiate(struct iwl_priv *priv,
@@ -387,117 +353,21 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
-const char *get_cmd_string(u8 cmd);
void iwl_bg_watchdog(unsigned long data);
u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval);
-#ifdef CONFIG_PM
-int iwl_suspend(struct iwl_priv *priv);
-int iwl_resume(struct iwl_priv *priv);
-#endif /* !CONFIG_PM */
-
-int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
-void __devexit iwl_remove(struct iwl_priv * priv);
-
-/*****************************************************
-* Error Handling Debugging
-******************************************************/
-void iwl_dump_nic_error_log(struct iwl_priv *priv);
-int iwl_dump_nic_event_log(struct iwl_priv *priv,
- bool full_log, char **buf, bool display);
-void iwl_dump_csr(struct iwl_priv *priv);
-int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-#ifdef CONFIG_IWLWIFI_DEBUG
-void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_clear_isr_stats(struct iwl_priv *priv);
/*****************************************************
* GEOS
******************************************************/
-int iwlcore_init_geos(struct iwl_priv *priv);
-void iwlcore_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS *****/
-
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_CT_KILL 4
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_DEVICE_ENABLED 18
-#define STATUS_CHANNEL_SWITCH_PENDING 19
-
-
-static inline int iwl_is_ready(struct iwl_priv *priv)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_is_alive(struct iwl_priv *priv)
-{
- return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_is_init(struct iwl_priv *priv)
-{
- return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_is_rfkill(struct iwl_priv *priv)
-{
- return iwl_is_rfkill_hw(priv);
-}
-
-static inline int iwl_is_ctkill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_is_ready_rf(struct iwl_priv *priv)
-{
-
- if (iwl_is_rfkill(priv))
- return 0;
-
- return iwl_is_ready(priv);
-}
+int iwl_init_geos(struct iwl_priv *priv);
+void iwl_free_geos(struct iwl_priv *priv);
extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
-void iwl_apm_stop(struct iwl_priv *priv);
-int iwl_apm_init(struct iwl_priv *priv);
int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -513,9 +383,12 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist;
}
-extern bool bt_siso_mode;
-
+static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
+{
+ IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
+ iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
-void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
+extern bool bt_siso_mode;
#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index d6dbb042304..b9f3267e720 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -439,4 +439,22 @@
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index f9a407e40af..69a77e24d22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -29,50 +29,51 @@
#ifndef __iwl_debug_h__
#define __iwl_debug_h__
+#include "iwl-bus.h"
+#include "iwl-shared.h"
+
struct iwl_priv;
-extern u32 iwl_debug_level;
-#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a)
+/*No matter what is m (priv, bus, trans), this will work */
+#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
+#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
+#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
+#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
-#define iwl_print_hex_error(priv, p, len) \
+#define iwl_print_hex_error(m, p, len) \
do { \
print_hex_dump(KERN_ERR, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...) \
+#define IWL_DEBUG(m, level, fmt, args...) \
do { \
- if (iwl_get_debug_level(__priv) & (level)) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ if (iwl_get_debug_level((m)->shrd) & (level)) \
+ dev_printk(KERN_ERR, bus(m)->dev, \
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
__func__ , ## args); \
} while (0)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
do { \
- if ((iwl_get_debug_level(__priv) & (level)) && net_ratelimit()) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
+ dev_printk(KERN_ERR, bus(m)->dev, \
"%c %s " fmt, in_interrupt() ? 'I' : 'U', \
__func__ , ## args); \
} while (0)
-#define iwl_print_hex_dump(priv, level, p, len) \
+#define iwl_print_hex_dump(m, level, p, len) \
do { \
- if (iwl_get_debug_level(priv) & level) \
+ if (iwl_get_debug_level((m)->shrd) & level) \
print_hex_dump(KERN_DEBUG, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
- const void *p, u32 len)
-{}
+#define IWL_DEBUG(m, level, fmt, args...)
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
+#define iwl_print_hex_dump(m, level, p, len)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -104,10 +105,12 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
*
* The active debug levels can be accessed via files
*
- * /sys/module/iwlagn/parameters/debug{50}
- * /sys/class/net/wlan0/device/debug_level
- *
+ * /sys/module/iwlwifi/parameters/debug
* when CONFIG_IWLWIFI_DEBUG=y.
+ *
+ * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level
+ * when CONFIG_IWLWIFI_DEBUGFS=y.
+ *
*/
/* 0x0000000F - 0x00000001 */
@@ -166,6 +169,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ec1485b2d3f..a1670e3f8bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -64,6 +64,14 @@
goto err; \
} while (0)
+#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_u32(#name, mode, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
/* file operation */
#define DEBUGFS_READ_FUNC(name) \
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -254,7 +262,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */
- val = iwl_read_targ_mem(priv, sram);
+ val = iwl_read_targ_mem(bus(priv), sram);
for (; len; len--) {
/* put the address at the start of every line */
@@ -273,7 +281,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) {
sram += 4;
offset = 0;
- val = iwl_read_targ_mem(priv, sram);
+ val = iwl_read_targ_mem(bus(priv), sram);
}
/* put in extra spaces and split lines for human readability */
@@ -340,7 +348,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
{
struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
- int max_sta = priv->hw_params.max_stations;
+ struct iwl_tid_data *tid_data;
char *buf;
int i, j, pos = 0;
ssize_t ret;
@@ -354,7 +362,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
priv->num_stations);
- for (i = 0; i < max_sta; i++) {
+ for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
station = &priv->stations[i];
if (!station->used)
continue;
@@ -363,22 +371,18 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
i, station->sta.sta.addr,
station->sta.station_flags_msk);
pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\tframes\ttfds\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap\t\t\trate_n_flags\n");
+ "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
- for (j = 0; j < MAX_TID_COUNT; j++) {
+ for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
+ tid_data = &priv->shrd->tid_data[i][j];
pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
- j, station->tid[j].seq_number,
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].tfds_in_queue,
- station->tid[j].agg.start_idx,
- station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
-
- if (station->tid[j].agg.wait_for_ba)
+ "%d:\t%#x\t%#x\t%u\t%#x",
+ j, tid_data->seq_number,
+ tid_data->agg.txq_id,
+ tid_data->tfds_in_queue,
+ tid_data->agg.rate_n_flags);
+
+ if (tid_data->agg.wait_for_ba)
pos += scnprintf(buf + pos, bufsz - pos,
" - waitforba");
pos += scnprintf(buf + pos, bufsz - pos, "\n");
@@ -442,46 +446,6 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_log_event_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -ENOMEM;
-
- ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- }
- return ret;
-}
-
-static ssize_t iwl_dbgfs_log_event_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 event_log_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &event_log_flag) != 1)
- return -EFAULT;
- if (event_log_flag == 1)
- iwl_dump_nic_event_log(priv, true, NULL, false);
-
- return count;
-}
-
-
-
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -492,7 +456,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
char *buf;
ssize_t ret;
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+ if (!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -562,45 +526,46 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
- test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+ test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->status));
+ test_bit(STATUS_INT_ENABLED, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->status));
+ test_bit(STATUS_CT_KILL, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->status));
+ test_bit(STATUS_INIT, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->status));
+ test_bit(STATUS_ALIVE, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->status));
+ test_bit(STATUS_READY, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->status));
+ test_bit(STATUS_TEMPERATURE, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+ test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->status));
+ test_bit(STATUS_EXIT_PENDING, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->status));
+ test_bit(STATUS_STATISTICS, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->status));
+ test_bit(STATUS_SCANNING, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->status));
+ test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->status));
+ test_bit(STATUS_SCAN_HW, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
- test_bit(STATUS_POWER_PMI, &priv->status));
+ test_bit(STATUS_POWER_PMI, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
- test_bit(STATUS_FW_ERROR, &priv->status));
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = file->private_data;
+
int pos = 0;
int cnt = 0;
char *buf;
@@ -613,61 +578,25 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
return -ENOMEM;
}
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- priv->isr_stats.hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- priv->isr_stats.sw);
- if (priv->isr_stats.sw || priv->isr_stats.hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- priv->isr_stats.err_code);
- }
-#ifdef CONFIG_IWLWIFI_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- priv->isr_stats.sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- priv->isr_stats.alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n",
- priv->isr_stats.rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- priv->isr_stats.ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- priv->isr_stats.wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n",
- priv->isr_stats.rx);
for (cnt = 0; cnt < REPLY_MAX; cnt++) {
- if (priv->isr_stats.rx_handlers[cnt] > 0)
+ if (priv->rx_handlers_stats[cnt] > 0)
pos += scnprintf(buf + pos, bufsz - pos,
"\tRx handler[%36s]:\t\t %u\n",
get_cmd_string(cnt),
- priv->isr_stats.rx_handlers[cnt]);
+ priv->rx_handlers_stats[cnt]);
}
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- priv->isr_stats.tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- priv->isr_stats.unhandled);
-
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
-static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
+
char buf[8];
int buf_size;
u32 reset_flag;
@@ -679,7 +608,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
if (sscanf(buf, "%x", &reset_flag) != 1)
return -EFAULT;
if (reset_flag == 0)
- iwl_clear_isr_stats(priv);
+ memset(&priv->rx_handlers_stats[0], 0,
+ sizeof(priv->rx_handlers_stats));
return count;
}
@@ -784,6 +714,20 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_temperature_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+
static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -814,14 +758,14 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
return -EINVAL;
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return -EAGAIN;
priv->power_data.debug_sleep_level_override = value;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_power_update_mode(priv, true);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return count;
}
@@ -870,15 +814,15 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
DEBUGFS_READ_WRITE_FILE_OPS(sram);
DEBUGFS_READ_FILE_OPS(wowlan_sram);
-DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
DEBUGFS_READ_FILE_OPS(qos);
DEBUGFS_READ_FILE_OPS(thermal_throttling);
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+DEBUGFS_READ_FILE_OPS(temperature);
DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
DEBUGFS_READ_FILE_OPS(current_sleep_command);
@@ -889,36 +833,23 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0;
int cnt = 0, entry;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
+
char *buf;
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ (hw_params(priv).max_txq_num * 32 * 8) + 400;
const u8 *ptr;
ssize_t ret;
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate buffer\n");
return -ENOMEM;
}
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n",
- cnt, q->read_ptr, q->write_ptr);
- }
- if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) {
+ if (priv->tx_traffic &&
+ (iwl_get_debug_level(priv->shrd) & IWL_DL_TX)) {
ptr = priv->tx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+ "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
entry++, ofs += 16) {
@@ -933,15 +864,11 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
}
}
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) {
+ if (priv->rx_traffic &&
+ (iwl_get_debug_level(priv->shrd) & IWL_DL_RX)) {
ptr = priv->rx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+ "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
entry++, ofs += 16) {
@@ -982,76 +909,6 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
return count;
}
-static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u stop=%d"
- " swq_id=%#.2x (ac %d/hwq %d)\n",
- cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
- txq->swq_id, txq->swq_id & 3,
- (txq->swq_id >> 2) & 0x1f);
- if (cnt >= 4)
- continue;
- /* for the ACs, display the stop count too */
- pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static const char *fmt_value = " %-30s %10u\n";
static const char *fmt_hex = " %-30s 0x%02X\n";
static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
@@ -1096,7 +953,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
struct statistics_rx_non_phy *delta_general, *max_general;
struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1522,7 +1379,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
ssize_t ret;
struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1716,7 +1573,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct statistics_div *div, *accum_div, *delta_div, *max_div;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1829,16 +1686,16 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
ssize_t ret;
struct statistics_bt_activity *bt, *accum_bt;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
if (!priv->bt_enable_flag)
return -EINVAL;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
if (ret) {
IWL_ERR(priv,
@@ -1917,7 +1774,7 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
(sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
ssize_t ret;
- if (!iwl_is_alive(priv))
+ if (!iwl_is_alive(priv->shrd))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2199,7 +2056,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
const size_t bufsz = sizeof(buf);
u32 pwrsave_status;
- pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+ pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
CSR_GP_REG_POWER_SAVE_STATUS_MSK;
pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2229,30 +2086,9 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return -EFAULT;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static ssize_t iwl_dbgfs_csr_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int csr;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &csr) != 1)
- return -EFAULT;
-
- iwl_dump_csr(priv);
+ mutex_unlock(&priv->shrd->mutex);
return count;
}
@@ -2333,25 +2169,6 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_dump_fh(priv, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -2434,8 +2251,8 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_priv *priv = file->private_data;
int i, pos = 0;
char buf[300];
@@ -2504,7 +2321,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
if (sscanf(buf, "%d", &flush) != 1)
return -EINVAL;
- if (iwl_is_rfkill(priv))
+ if (iwl_is_rfkill(priv->shrd))
return -EFAULT;
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
@@ -2514,8 +2331,8 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_priv *priv = file->private_data;
char buf[8];
int buf_size;
@@ -2626,11 +2443,26 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ iwl_cmd_echo_test(priv);
+ return count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2639,9 +2471,7 @@ DEBUGFS_READ_FILE_OPS(chain_noise);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
-DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
@@ -2653,6 +2483,53 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout);
DEBUGFS_READ_FILE_OPS(bt_traffic);
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
+DEBUGFS_WRITE_FILE_OPS(echo_test);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static ssize_t iwl_dbgfs_debug_level_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_shared *shrd = priv->shrd;
+ char buf[11];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "0x%.8x",
+ iwl_get_debug_level(shrd));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_debug_level_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_shared *shrd = priv->shrd;
+ char buf[11];
+ unsigned long val;
+ int ret;
+
+ if (count > sizeof(buf))
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ shrd->dbg_level_dev = val;
+ if (iwl_alloc_traffic_mem(priv))
+ IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+ return count;
+}
+DEBUGFS_READ_WRITE_FILE_OPS(debug_level);
+#endif /* CONFIG_IWLWIFI_DEBUG */
/*
* Create the debugfs files and directories
@@ -2682,26 +2559,23 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
+
DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
@@ -2710,7 +2584,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
-
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
@@ -2719,12 +2592,20 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ DEBUGFS_ADD_FILE(debug_level, dir_debug, S_IRUSR | S_IWUSR);
+#endif
+
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
&priv->disable_sens_cal);
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&priv->disable_chain_noise_cal);
+
+ if (iwl_trans_dbgfs_register(trans(priv), dir_debug))
+ goto err;
return 0;
err:
@@ -2745,6 +2626,3 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
debugfs_remove_recursive(priv->debugfs_dir);
priv->debugfs_dir = NULL;
}
-
-
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6c9790cac8d..6c00a447963 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -32,16 +32,15 @@
#define __iwl_dev_h__
#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/leds.h>
+#include <linux/slab.h>
#include <net/ieee80211_radiotap.h>
#include "iwl-eeprom.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
-#include "iwl-fh.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
@@ -50,8 +49,7 @@
#include "iwl-agn-tt.h"
#include "iwl-bus.h"
#include "iwl-trans.h"
-
-#define DRV_NAME "iwlagn"
+#include "iwl-shared.h"
struct iwl_tx_queue;
@@ -90,109 +88,6 @@ struct iwl_tx_queue;
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- /*
- * only for ASYNC commands
- * (which is somewhat stupid -- look at iwl-sta.c for instance
- * which duplicates a bunch of code because the callback isn't
- * invoked for SYNC commands, if it were and its result passed
- * through it would be simpler...)
- */
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
-
- u32 flags;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
- * in the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
- struct sk_buff *skb;
- struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
- struct iwl_queue q;
- struct iwl_tfd *tfds;
- struct iwl_device_cmd **cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_info *txb;
- unsigned long time_stamp;
- u8 need_update;
- u8 sched_retry;
- u8 active;
- u8 swq_id;
-};
-
#define IWL_NUM_SCAN_RATES (2)
/*
@@ -222,20 +117,16 @@ struct iwl_channel_info {
u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
};
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN 4
-#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN 5
-#define IWL_TX_FIFO_UNUSED -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES 10
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ * - 4 standard TX queues
+ * - the command queue
+ * - 4 PAN TX queues
+ * - the PAN multicast queue, and
+ * - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES 11
/*
* Command queue depends on iPAN support.
@@ -243,161 +134,20 @@ struct iwl_channel_info {
#define IWL_DEFAULT_CMD_QUEUE_NUM 4
#define IWL_IPAN_CMD_QUEUE_NUM 9
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE 8
-
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
- CMD_SYNC = 0,
- CMD_ASYNC = BIT(0),
- CMD_WANT_SKB = BIT(1),
- CMD_ON_DEMAND = BIT(2),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for commands that
- * aren't fully copied and use other TFD space.
- */
-struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- union {
- u32 flags;
- u8 val8;
- u16 val16;
- u32 val32;
- struct iwl_tx_cmd tx;
- struct iwl6000_channel_switch_cmd chswitch;
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-#define IWL_MAX_CMD_TFDS 2
-
-enum iwl_hcmd_dataflag {
- IWL_HCMD_DFL_NOCOPY = BIT(0),
-};
-
-/**
- * struct iwl_host_cmd - Host command to the uCode
- * @data: array of chunks that composes the data of the host command
- * @reply_page: pointer to the page that holds the response to the host command
- * @callback:
- * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
- * @len: array of the lenths of the chunks in data
- * @dataflags:
- * @id: id of the host command
- */
-struct iwl_host_cmd {
- const void *data[IWL_MAX_CMD_TFDS];
- unsigned long reply_page;
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
- u32 flags;
- u16 len[IWL_MAX_CMD_TFDS];
- u8 dataflags[IWL_MAX_CMD_TFDS];
- u8 id;
-};
-
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
#define IWL_SUPPORTED_RATES_IE_LEN 8
-#define MAX_TID_COUNT 9
-
#define IWL_INVALID_RATE 0xFF
#define IWL_INVALID_VALUE -1
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
- u16 txq_id;
- u16 frame_count;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
- u8 state;
- u8 tx_fifo;
-};
-
-
-struct iwl_tid_data {
- u16 seq_number; /* agn only */
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
-};
-
union iwl_ht_rate_supp {
u16 rates;
struct {
@@ -448,16 +198,10 @@ struct iwl_qos_info {
*/
struct iwl_station_entry {
struct iwl_addsta_cmd sta;
- struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used, ctxid;
struct iwl_link_quality_cmd *lq;
};
-struct iwl_station_priv_common {
- struct iwl_rxon_context *ctx;
- u8 sta_id;
-};
-
/*
* iwl_station_priv: Driver's private station information
*
@@ -466,12 +210,13 @@ struct iwl_station_priv_common {
* space.
*/
struct iwl_station_priv {
- struct iwl_station_priv_common common;
+ struct iwl_rxon_context *ctx;
struct iwl_lq_sta lq_sta;
atomic_t pending_frames;
bool client;
bool asleep;
u8 max_agg_bufsize;
+ u8 sta_id;
};
/**
@@ -564,11 +309,13 @@ enum iwl_ucode_tlv_type {
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
};
struct iwl_ucode_tlv {
@@ -634,54 +381,6 @@ struct iwl_sensitivity_ranges {
#define CELSIUS_TO_KELVIN(x) ((x)+273)
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @calib_init_cfg: setup initial calibrations for the hw
- * @calib_rt_cfg: setup runtime calibrations for the hw
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
- u8 max_txq_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
- u8 tx_chains_num;
- u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
- u32 rx_page_order;
- u8 max_stations;
- u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
- u32 max_inst_size;
- u32 max_data_size;
- u32 ct_kill_threshold; /* value in hw-dependent units */
- u32 ct_kill_exit_threshold; /* value in hw-dependent units */
- /* for 1000, 6000 series and up */
- u16 beacon_time_tsf_bits;
- u32 calib_init_cfg;
- u32 calib_rt_cfg;
- const struct iwl_sensitivity_ranges *sens;
-};
-
-
/******************************************************************************
*
* Functions implemented in core module which are forward declared here
@@ -697,35 +396,12 @@ struct iwl_hw_params {
****************************************************************************/
extern void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
-extern int iwl_queue_space(const struct iwl_queue *q);
-static inline int iwl_queue_used(const struct iwl_queue *q, int i)
-{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
#define IWL_OPERATION_MODE_AUTO 0
#define IWL_OPERATION_MODE_HT_ONLY 1
#define IWL_OPERATION_MODE_MIXED 2
#define IWL_OPERATION_MODE_20MHZ 3
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
/* Sensitivity and chain noise calibration */
@@ -849,9 +525,6 @@ struct iwl_chain_noise_data {
#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
-
enum {
MEASUREMENT_READY = (1 << 0),
MEASUREMENT_ACTIVE = (1 << 1),
@@ -884,22 +557,6 @@ enum iwl_pa_type {
IWL_PA_INTERNAL = 1,
};
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 rx_handlers[REPLY_MAX];
- u32 tx;
- u32 unhandled;
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -1009,21 +666,6 @@ struct iwl_event_log {
};
/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-/*
* This is the threshold value of plcp error rate per 100mSecs. It is
* used to set and check for the validity of plcp_delta.
*/
@@ -1101,20 +743,9 @@ struct iwl_notification_wait {
bool triggered, aborted;
};
-enum iwl_rxon_context_id {
- IWL_RXON_CTX_BSS,
- IWL_RXON_CTX_PAN,
-
- NUM_IWL_RXON_CTX
-};
-
struct iwl_rxon_context {
struct ieee80211_vif *vif;
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
/*
* We could use the vif to indicate active, but we
* also need it to be active during disabling when
@@ -1162,13 +793,16 @@ struct iwl_rxon_context {
u8 extension_chan_offset;
} ht;
+ u8 bssid[ETH_ALEN];
+ bool preauth_bssid;
+
bool last_tx_rejected;
};
enum iwl_scan_type {
IWL_SCAN_NORMAL,
IWL_SCAN_RADIO_RESET,
- IWL_SCAN_OFFCH_TX,
+ IWL_SCAN_ROC,
};
enum iwlagn_ucode_type {
@@ -1190,24 +824,26 @@ struct iwl_testmode_trace {
};
#endif
-/* uCode ownership */
-#define IWL_OWNERSHIP_DRIVER 0
-#define IWL_OWNERSHIP_TM 1
-
struct iwl_priv {
+ /*data shared among all the driver's layers */
+ struct iwl_shared _shrd;
+ struct iwl_shared *shrd;
+
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
+ struct kmem_cache *tx_cmd_pool;
struct iwl_cfg *cfg;
enum ieee80211_band band;
void (*pre_rx_handler)(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
- void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
@@ -1225,6 +861,9 @@ struct iwl_priv {
/* jiffies when last recovery from statistics was performed */
unsigned long rx_statistics_jiffies;
+ /*counters */
+ u32 rx_handlers_stats[REPLY_MAX];
+
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
@@ -1238,7 +877,7 @@ struct iwl_priv {
u8 channel_count; /* # of channels */
/* thermal calibration */
- s32 temperature; /* degrees Kelvin */
+ s32 temperature; /* Celsius */
s32 last_temperature;
/* init calibration results */
@@ -1255,21 +894,6 @@ struct iwl_priv {
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
- /* spinlock */
- spinlock_t lock; /* protect general shared data */
- spinlock_t hcmd_lock; /* protect hcmd */
- spinlock_t reg_lock; /* protect hw register access */
- struct mutex mutex;
-
- struct iwl_bus *bus; /* bus specific data */
- struct iwl_trans trans;
-
- /* microcode/device supports multiple contexts */
- u8 valid_contexts;
-
- /* command queue number */
- u8 cmd_queue;
-
/* max number of station keys */
u8 sta_key_max_num;
@@ -1283,9 +907,6 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
- /* uCode owner: default: IWL_OWNERSHIP_DRIVER */
- u8 ucode_owner;
-
struct fw_img ucode_rt;
struct fw_img ucode_init;
struct fw_img ucode_wowlan;
@@ -1317,52 +938,25 @@ struct iwl_priv {
/* Rate scaling data */
u8 retry_rate;
- wait_queue_head_t wait_command_queue;
-
int activity_timer_active;
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
- struct iwl_tx_queue *txq;
- unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
- struct iwl_dma_ptr scd_bc_tbls;
-
- u32 scd_base_addr; /* scheduler sram base address */
-
- unsigned long status;
-
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
struct traffic_stats rx_stats;
- /* counts interrupts */
- struct isr_statistics isr_stats;
-
struct iwl_power_mgr power_data;
struct iwl_tt_mgmt thermal_throttle;
/* station table variables */
-
- /* Note: if lock and sta_lock are needed, lock must be acquired first */
- spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
- /* queue refcounts */
-#define IWL_MAX_HW_QUEUES 32
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- /* for each AC */
- atomic_t queue_stop_count[4];
+ u8 mac80211_registered;
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
- u8 mac80211_registered;
-
- bool wowlan;
-
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
@@ -1398,14 +992,6 @@ struct iwl_priv {
} accum_stats, delta_stats, max_delta_stats;
#endif
- /* INT ICT Table */
- __le32 *ict_tbl;
- void *ict_tbl_vir;
- dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
- int ict_index;
- u32 inta;
- bool use_ict;
/*
* reporting the number of tids has AGG on. 0 means
* no AGGREGATION
@@ -1438,14 +1024,10 @@ struct iwl_priv {
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
- struct delayed_work hw_roc_work;
+ struct delayed_work hw_roc_disable_work;
enum nl80211_channel_type hw_roc_chantype;
int hw_roc_duration;
- bool hw_roc_setup;
-
- struct sk_buff *offchan_tx_skb;
- int offchan_tx_timeout;
- struct ieee80211_channel *offchan_tx_chan;
+ bool hw_roc_setup, hw_roc_start_notified;
/* bt coex */
u8 bt_enable_flag;
@@ -1466,15 +1048,8 @@ struct iwl_priv {
struct iwl_rxon_context *cur_rssi_ctx;
bool bt_is_sco;
- struct iwl_hw_params hw_params;
-
- u32 inta_mask;
-
- struct workqueue_struct *workqueue;
-
struct work_struct restart;
struct work_struct scan_completed;
- struct work_struct rx_replenish;
struct work_struct abort_scan;
struct work_struct beacon_update;
@@ -1490,8 +1065,6 @@ struct iwl_priv {
struct work_struct bt_full_concurrency;
struct work_struct bt_runtime_config;
- struct tasklet_struct irq_tasklet;
-
struct delayed_work scan_check;
/* TX Power */
@@ -1500,12 +1073,6 @@ struct iwl_priv {
s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
s8 tx_power_next;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- /* debugging info */
- u32 debug_level; /* per device debugging will override global
- iwl_debug_level if set */
-#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* debugfs */
u16 tx_traffic_idx;
@@ -1543,47 +1110,7 @@ struct iwl_priv {
bool have_rekey_data;
}; /*iwl_priv */
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
- set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
- clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-/*
- * iwl_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
-{
- if (priv->debug_level)
- return priv->debug_level;
- else
- return iwl_debug_level;
-}
-#else
-static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
-{
- return iwl_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
- int txq_id, int idx)
-{
- if (priv->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb->data;
- return NULL;
-}
+extern struct iwl_mod_params iwlagn_mod_params;
static inline struct iwl_rxon_context *
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
@@ -1596,7 +1123,7 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
#define for_each_context(priv, ctx) \
for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
- if (priv->valid_contexts & BIT(ctx->ctxid))
+ if (priv->shrd->valid_contexts & BIT(ctx->ctxid))
static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
{
@@ -1650,13 +1177,4 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
}
-static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
-{
- __free_pages(page, priv->hw_params.rx_page_order);
-}
-
-static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
-{
- free_pages(page, priv->hw_params.rx_page_order);
-}
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 2c84ba95afc..8a51c5ccda1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,6 +29,8 @@
#include <linux/tracepoint.h>
+struct iwl_priv;
+
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 19d31a5e32e..a4e43bd4a54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -72,6 +72,7 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
+#include "iwl-agn.h"
#include "iwl-eeprom.h"
#include "iwl-io.h"
@@ -138,7 +139,7 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
/******************************************************************************
*
- * EEPROM related functions
+ * generic NVM functions
*
******************************************************************************/
@@ -155,11 +156,11 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
@@ -176,14 +177,14 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
{
- iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
{
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
@@ -214,19 +215,106 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
return ret;
}
+u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+{
+ if (!priv->eeprom)
+ return 0;
+ return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+}
+
+int iwl_eeprom_check_version(struct iwl_priv *priv)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+ calib_ver = iwlagn_eeprom_calib_version(priv);
+
+ if (eeprom_ver < priv->cfg->eeprom_ver ||
+ calib_ver < priv->cfg->eeprom_calib_ver)
+ goto err;
+
+ IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n",
+ eeprom_ver, priv->cfg->eeprom_ver,
+ calib_ver, priv->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+int iwl_eeprom_check_sku(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ if (!priv->cfg->sku) {
+ /* not using sku overwrite */
+ priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !priv->cfg->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
+ }
+ }
+ if (!priv->cfg->sku) {
+ IWL_ERR(priv, "Invalid device sku\n");
+ return -EINVAL;
+ }
+
+ IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+
+ if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+ /* not using .cfg overwrite */
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
+ priv->cfg->valid_tx_ant,
+ priv->cfg->valid_rx_ant);
+ return -EINVAL;
+ }
+ IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
+ priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+ }
+ /*
+ * for some special cases,
+ * EEPROM did not reflect the correct antenna setting
+ * so overwrite the valid tx/rx antenna from .cfg
+ */
+ return 0;
+}
+
+void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+{
+ const u8 *addr = iwl_eeprom_query_addr(priv,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
+
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
+
static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
{
- iwl_read32(priv, CSR_OTP_GP_REG);
+ iwl_read32(bus(priv), CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(priv, CSR_OTP_GP_REG,
+ iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
-static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@@ -243,7 +331,7 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
- otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@@ -258,22 +346,22 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
int ret;
/* Enable 40MHz radio clock */
- iwl_write32(priv, CSR_GP_CNTRL,
- iwl_read32(priv, CSR_GP_CNTRL) |
+ iwl_write32(bus(priv), CSR_GP_CNTRL,
+ iwl_read32(bus(priv), CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
IWL_ERR(priv, "Time out access OTP\n");
else {
- iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+ iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
@@ -281,7 +369,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
* this is only applicable for HW with OTP shadow RAM
*/
if (priv->cfg->base_params->shadow_ram_support)
- iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
+ iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
@@ -293,9 +381,9 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
u32 r;
u32 otpgp;
- iwl_write32(priv, CSR_EEPROM_REG,
+ iwl_write32(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
@@ -303,13 +391,13 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
return ret;
}
- r = iwl_read32(priv, CSR_EEPROM_REG);
+ r = iwl_read32(bus(priv), CSR_EEPROM_REG);
/* check for ECC errors: */
- otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
@@ -317,7 +405,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
}
@@ -407,11 +495,152 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
return -EINVAL;
}
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+/******************************************************************************
+ *
+ * Tx Power related functions
+ *
+******************************************************************************/
+/**
+ * iwl_get_max_txpower_avg - get the highest tx power from all chains.
+ * find the highest tx power from all chains for the channel
+ */
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+ struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
+ int element, s8 *max_txpower_in_half_dbm)
{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+ s8 max_txpower_avg = 0; /* (dBm) */
+
+ /* Take the highest tx power from any valid chains */
+ if ((priv->cfg->valid_tx_ant & ANT_A) &&
+ (enhanced_txpower[element].chain_a_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].chain_a_max;
+ if ((priv->cfg->valid_tx_ant & ANT_B) &&
+ (enhanced_txpower[element].chain_b_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].chain_b_max;
+ if ((priv->cfg->valid_tx_ant & ANT_C) &&
+ (enhanced_txpower[element].chain_c_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].chain_c_max;
+ if (((priv->cfg->valid_tx_ant == ANT_AB) |
+ (priv->cfg->valid_tx_ant == ANT_BC) |
+ (priv->cfg->valid_tx_ant == ANT_AC)) &&
+ (enhanced_txpower[element].mimo2_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].mimo2_max;
+ if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+ (enhanced_txpower[element].mimo3_max > max_txpower_avg))
+ max_txpower_avg = enhanced_txpower[element].mimo3_max;
+
+ /*
+ * max. tx power in EEPROM is in 1/2 dBm format
+ * convert from 1/2 dBm to dBm (round-up convert)
+ * but we also do not want to loss 1/2 dBm resolution which
+ * will impact performance
+ */
+ *max_txpower_in_half_dbm = max_txpower_avg;
+ return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
+}
+
+static void
+iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
+ struct iwl_eeprom_enhanced_txpwr *txp,
+ s8 max_txpower_avg)
+{
+ int ch_idx;
+ bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
+ enum ieee80211_band band;
+
+ band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+
+ for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
+ struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
+
+ /* update matching channel or from common data only */
+ if (txp->channel != 0 && ch_info->channel != txp->channel)
+ continue;
+
+ /* update matching band only */
+ if (band != ch_info->band)
+ continue;
+
+ if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
+ ch_info->max_power_avg = max_txpower_avg;
+ ch_info->curr_txpow = max_txpower_avg;
+ ch_info->scan_power = max_txpower_avg;
+ }
+
+ if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
+ ch_info->ht40_max_power_avg = max_txpower_avg;
+ }
+}
+
+#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
+#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
+#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
+
+#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
+ ? # x " " : "")
+
+void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
+{
+ struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ int idx, entries;
+ __le16 *txp_len;
+ s8 max_txp_avg, max_txp_avg_halfdbm;
+
+ BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+
+ /* the length is in 16-bit words, but we want entries */
+ txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+
+ txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+
+ for (idx = 0; idx < entries; idx++) {
+ txp = &txp_array[idx];
+ /* skip invalid entries */
+ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
+ continue;
+
+ IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+ (txp->channel && (txp->flags &
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
+ "Common " : (txp->channel) ?
+ "Channel" : "Common",
+ (txp->channel),
+ TXP_CHECK_AND_PRINT(VALID),
+ TXP_CHECK_AND_PRINT(BAND_52G),
+ TXP_CHECK_AND_PRINT(OFDM),
+ TXP_CHECK_AND_PRINT(40MHZ),
+ TXP_CHECK_AND_PRINT(HT_AP),
+ TXP_CHECK_AND_PRINT(RES1),
+ TXP_CHECK_AND_PRINT(RES2),
+ TXP_CHECK_AND_PRINT(COMMON_TYPE),
+ txp->flags);
+ IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
+ "chain_B: 0X%02x chain_C: 0X%02x\n",
+ txp->chain_a_max, txp->chain_b_max,
+ txp->chain_c_max);
+ IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
+ "MIMO3: 0x%02x High 20_on_40: 0x%02x "
+ "Low 20_on_40: 0x%02x\n",
+ txp->mimo2_max, txp->mimo3_max,
+ ((txp->delta_20_in_40 & 0xf0) >> 4),
+ (txp->delta_20_in_40 & 0x0f));
+
+ max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+ &max_txp_avg_halfdbm);
+
+ /*
+ * Update the user limit values values to the highest
+ * power supported by any channel
+ */
+ if (max_txp_avg > priv->tx_power_user_lmt)
+ priv->tx_power_user_lmt = max_txp_avg;
+ if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
+ priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
+
+ iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
+ }
}
/**
@@ -424,14 +653,14 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
__le16 *e;
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+ u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
int sz;
int ret;
u16 addr;
u16 validblockaddr = 0;
u16 cache_addr = 0;
- priv->nvm_device_type = iwlcore_get_nvm_type(priv, hw_rev);
+ priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
if (priv->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
@@ -469,11 +698,11 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
ret = -ENOENT;
goto done;
}
- iwl_write32(priv, CSR_EEPROM_GP,
- iwl_read32(priv, CSR_EEPROM_GP) &
+ iwl_write32(bus(priv), CSR_EEPROM_GP,
+ iwl_read32(bus(priv), CSR_EEPROM_GP) &
~CSR_EEPROM_GP_IF_OWNER_MSK);
- iwl_set_bit(priv, CSR_OTP_GP_REG,
+ iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
@@ -498,10 +727,10 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
for (addr = 0; addr < sz; addr += sizeof(u16)) {
u32 r;
- iwl_write32(priv, CSR_EEPROM_REG,
+ iwl_write32(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
@@ -509,7 +738,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
goto done;
}
- r = iwl_read32(priv, CSR_EEPROM_REG);
+ r = iwl_read32(bus(priv), CSR_EEPROM_REG);
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
@@ -670,8 +899,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
priv->channel_count);
- priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
- priv->channel_count, GFP_KERNEL);
+ priv->channel_info = kcalloc(priv->channel_count,
+ sizeof(struct iwl_channel_info),
+ GFP_KERNEL);
if (!priv->channel_info) {
IWL_ERR(priv, "Could not allocate channel_info\n");
priv->channel_count = 0;
@@ -838,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -850,7 +1080,7 @@ void iwl_rf_config(struct iwl_priv *priv)
WARN_ON(1);
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index e4bf8ac5e64..c94747e7299 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -163,11 +163,19 @@ struct iwl_eeprom_enhanced_txpwr {
} __packed;
/* calibration */
+struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ __le16 voltage;
+} __packed;
+
#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
/* temperature */
-#define EEPROM_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
+
/* agn links */
#define EEPROM_LINK_HOST (2*0x64)
@@ -301,7 +309,6 @@ void iwl_eeprom_free(struct iwl_priv *priv);
int iwl_eeprom_check_version(struct iwl_priv *priv);
int iwl_eeprom_check_sku(struct iwl_priv *priv);
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
void iwl_free_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 0ad60b3c04d..5bede9d7f95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -63,6 +63,8 @@
#ifndef __iwl_fh_h__
#define __iwl_fh_h__
+#include <linux/types.h>
+
/****************************/
/* Flow Handler Definitions */
/****************************/
@@ -266,8 +268,6 @@
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
/**
* Rx Shared Status Registers (RSSR)
*
@@ -422,10 +422,6 @@
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
/**
* struct iwl_rb_status - reseve buffer status
* host memory mapped FH registers
@@ -508,4 +504,16 @@ struct iwl_tfd {
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwlagn_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+} __packed;
+
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
deleted file mode 100644
index 9d91552d13c..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_helpers_h__
-#define __iwl_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *ieee80211_get_hw_conf(
- struct ieee80211_hw *hw)
-{
- return &hw->conf;
-}
-
-/**
- * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
- BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only use 5 bits */
-
- txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_wake_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (test_and_clear_bit(hwq, priv->queue_stopped))
- if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_stop_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (!test_and_set_bit(hwq, priv->queue_stopped))
- if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-static inline void iwl_wake_any_queue(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- u8 ac;
-
- for (ac = 0; ac < AC_NUM; ac++) {
- IWL_DEBUG_INFO(priv, "Queue Status: Q[%d] %s\n",
- ac, (atomic_read(&priv->queue_stop_count[ac]) > 0)
- ? "stopped" : "awake");
- iwl_wake_queue(priv, &priv->txq[ctx->ac_to_queue[ac]]);
- }
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_disable_interrupts(struct iwl_priv *priv)
-{
- clear_bit(STATUS_INT_ENABLED, &priv->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv, CSR_INT, 0xffffffff);
- iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
- IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_enable_interrupts(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index aa4a9067445..3ffa8e62b85 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -25,46 +25,50 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
+#include <linux/delay.h>
+#include <linux/device.h>
#include "iwl-io.h"
+#include"iwl-csr.h"
+#include "iwl-debug.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+static inline void __iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
- iwl_write32(priv, reg, iwl_read32(priv, reg) | mask);
+ iwl_write32(bus, reg, iwl_read32(bus, reg) | mask);
}
-static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+static inline void __iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
- iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask);
+ iwl_write32(bus, reg, iwl_read32(bus, reg) & ~mask);
}
-void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- __iwl_set_bit(priv, reg, mask);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ __iwl_set_bit(bus, reg, mask);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- __iwl_clear_bit(priv, reg, mask);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ __iwl_clear_bit(bus, reg, mask);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
+int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
- if ((iwl_read32(priv, addr) & mask) == (bits & mask))
+ if ((iwl_read32(bus, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
@@ -73,14 +77,14 @@ int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
return -ETIMEDOUT;
}
-int iwl_grab_nic_access_silent(struct iwl_priv *priv)
+int iwl_grab_nic_access_silent(struct iwl_bus *bus)
{
int ret;
- lockdep_assert_held(&priv->reg_lock);
+ lockdep_assert_held(&bus->reg_lock);
/* this bit wakes up the NIC */
- __iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ __iwl_set_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
@@ -101,70 +105,70 @@ int iwl_grab_nic_access_silent(struct iwl_priv *priv)
* 5000 series and later (including 1000 series) have non-volatile SRAM,
* and do not save/restore SRAM when power cycling.
*/
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (ret < 0) {
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ iwl_write32(bus, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
return -EIO;
}
return 0;
}
-int iwl_grab_nic_access(struct iwl_priv *priv)
+int iwl_grab_nic_access(struct iwl_bus *bus)
{
- int ret = iwl_grab_nic_access_silent(priv);
+ int ret = iwl_grab_nic_access_silent(bus);
if (ret) {
- u32 val = iwl_read32(priv, CSR_GP_CNTRL);
- IWL_ERR(priv,
+ u32 val = iwl_read32(bus, CSR_GP_CNTRL);
+ IWL_ERR(bus,
"MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
}
return ret;
}
-void iwl_release_nic_access(struct iwl_priv *priv)
+void iwl_release_nic_access(struct iwl_bus *bus)
{
- lockdep_assert_held(&priv->reg_lock);
- __iwl_clear_bit(priv, CSR_GP_CNTRL,
+ lockdep_assert_held(&bus->reg_lock);
+ __iwl_clear_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
-u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
+u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
{
u32 value;
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- value = iwl_read32(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ value = iwl_read32(bus(bus), reg);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
return value;
}
-void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
+void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write32(priv, reg, value);
- iwl_release_nic_access(priv);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus)) {
+ iwl_write32(bus, reg, value);
+ iwl_release_nic_access(bus);
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
+int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
int timeout)
{
int t = 0;
do {
- if ((iwl_read_direct32(priv, addr) & mask) == mask)
+ if ((iwl_read_direct32(bus, addr) & mask) == mask)
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
@@ -173,122 +177,122 @@ int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
return -ETIMEDOUT;
}
-static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg)
+static inline u32 __iwl_read_prph(struct iwl_bus *bus, u32 reg)
{
- iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ iwl_write32(bus, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
rmb();
- return iwl_read32(priv, HBUS_TARG_PRPH_RDAT);
+ return iwl_read32(bus, HBUS_TARG_PRPH_RDAT);
}
-static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+static inline void __iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
{
- iwl_write32(priv, HBUS_TARG_PRPH_WADDR,
+ iwl_write32(bus, HBUS_TARG_PRPH_WADDR,
((addr & 0x0000FFFF) | (3 << 24)));
wmb();
- iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val);
+ iwl_write32(bus, HBUS_TARG_PRPH_WDAT, val);
}
-u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
+u32 iwl_read_prph(struct iwl_bus *bus, u32 reg)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- val = __iwl_read_prph(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ val = __iwl_read_prph(bus, reg);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
return val;
}
-void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- __iwl_write_prph(priv, addr, val);
- iwl_release_nic_access(priv);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus)) {
+ __iwl_write_prph(bus, addr, val);
+ iwl_release_nic_access(bus);
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- __iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ __iwl_write_prph(bus, reg, __iwl_read_prph(bus, reg) | mask);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
u32 bits, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- __iwl_write_prph(priv, reg,
- (__iwl_read_prph(priv, reg) & mask) | bits);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ __iwl_write_prph(bus, reg,
+ (__iwl_read_prph(bus, reg) & mask) | bits);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
- val = __iwl_read_prph(priv, reg);
- __iwl_write_prph(priv, reg, (val & ~mask));
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
+ val = __iwl_read_prph(bus, reg);
+ __iwl_write_prph(bus, reg, (val & ~mask));
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
+void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
void *buf, int words)
{
unsigned long flags;
int offs;
u32 *vals = buf;
- spin_lock_irqsave(&priv->reg_lock, flags);
- iwl_grab_nic_access(priv);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ iwl_grab_nic_access(bus);
- iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr);
+ iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
rmb();
for (offs = 0; offs < words; offs++)
- vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ iwl_release_nic_access(bus);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
-u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
+u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
{
u32 value;
- _iwl_read_targ_mem_words(priv, addr, &value, 1);
+ _iwl_read_targ_mem_words(bus, addr, &value, 1);
return value;
}
-void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
+void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr);
+ spin_lock_irqsave(&bus->reg_lock, flags);
+ if (!iwl_grab_nic_access(bus)) {
+ iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
wmb();
- iwl_write32(priv, HBUS_TARG_MEM_WDAT, val);
- iwl_release_nic_access(priv);
+ iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
+ iwl_release_nic_access(bus);
}
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&bus->reg_lock, flags);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 19a09310112..ced2cbeb6ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -29,65 +29,62 @@
#ifndef __iwl_io_h__
#define __iwl_io_h__
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
#include "iwl-devtrace.h"
+#include "iwl-shared.h"
#include "iwl-bus.h"
-static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
- trace_iwlwifi_dev_iowrite8(priv, ofs, val);
- bus_write8(priv->bus, ofs, val);
+ trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
+ bus_write8(bus, ofs, val);
}
-static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
{
- trace_iwlwifi_dev_iowrite32(priv, ofs, val);
- bus_write32(priv->bus, ofs, val);
+ trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
+ bus_write32(bus, ofs, val);
}
-static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
+static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
{
- u32 val = bus_read32(priv->bus, ofs);
- trace_iwlwifi_dev_ioread32(priv, ofs, val);
+ u32 val = bus_read32(bus, ofs);
+ trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
return val;
}
-void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask);
+void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
+void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
-int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
+int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
u32 bits, u32 mask, int timeout);
-int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
+int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
int timeout);
-int iwl_grab_nic_access_silent(struct iwl_priv *priv);
-int iwl_grab_nic_access(struct iwl_priv *priv);
-void iwl_release_nic_access(struct iwl_priv *priv);
+int iwl_grab_nic_access_silent(struct iwl_bus *bus);
+int iwl_grab_nic_access(struct iwl_bus *bus);
+void iwl_release_nic_access(struct iwl_bus *bus);
-u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg);
-void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value);
+u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
+void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
-u32 iwl_read_prph(struct iwl_priv *priv, u32 reg);
-void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val);
-void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
-void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
+void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
+void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
+void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
u32 bits, u32 mask);
-void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
+void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
-void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
+void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
void *buf, int words);
-#define iwl_read_targ_mem_words(priv, addr, buf, bufsize) \
+#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_words(priv, addr, buf, \
+ _iwl_read_targ_mem_words(bus, addr, buf, \
(bufsize) / sizeof(u32));\
} while (0)
-u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr);
-void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val);
+u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
+void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a67ae56d546..eb541735296 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -41,6 +40,7 @@
#include "iwl-agn.h"
#include "iwl-io.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
@@ -71,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+ iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
/*
@@ -104,15 +104,14 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
.len = { sizeof(struct iwl_led_cmd), },
.data = { led_cmd, },
.flags = CMD_ASYNC,
- .callback = NULL,
};
u32 reg;
- reg = iwl_read32(priv, CSR_LED_REG);
+ reg = iwl_read32(bus(priv), CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+ iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
/* Set led pattern command */
@@ -126,7 +125,7 @@ static int iwl_led_cmd(struct iwl_priv *priv,
};
int ret;
- if (!test_bit(STATUS_READY, &priv->status))
+ if (!test_bit(STATUS_READY, &priv->shrd->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
@@ -203,8 +202,7 @@ void iwl_leds_init(struct iwl_priv *priv)
break;
}
- ret = led_classdev_register(priv->bus->dev,
- &priv->led);
+ ret = led_classdev_register(bus(priv)->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 2fdbffa079c..3b6cc66365e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -64,9 +64,11 @@
#include <linux/pci-aspm.h>
#include "iwl-bus.h"
-#include "iwl-agn.h"
-#include "iwl-core.h"
#include "iwl-io.h"
+#include "iwl-shared.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-cfg.h"
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -91,6 +93,7 @@ static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
{
int pos;
u16 pci_lnk_ctl;
+
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
pos = pci_pcie_cap(pci_dev);
@@ -120,23 +123,17 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
/* L1-ASPM enabled; disable(!) L0S */
- iwl_set_bit(bus->drv_data, CSR_GIO_REG,
+ iwl_set_bit(bus, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
} else {
/* L1-ASPM disabled; enable(!) L0S */
- iwl_clear_bit(bus->drv_data, CSR_GIO_REG,
+ iwl_clear_bit(bus, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
}
}
-static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
-{
- bus->drv_data = drv_data;
- pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
-}
-
static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
int buf_len)
{
@@ -162,10 +159,9 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
return val;
}
-static struct iwl_bus_ops pci_ops = {
+static const struct iwl_bus_ops bus_ops_pci = {
.get_pm_support = iwl_pci_is_pm_supported,
.apm_config = iwl_pci_apm_config,
- .set_drv_data = iwl_pci_set_drv_data,
.get_hw_id = iwl_pci_get_hw_id,
.write8 = iwl_pci_write8,
.write32 = iwl_pci_write32,
@@ -256,6 +252,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -328,6 +327,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
/* 2x30 Series */
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
@@ -355,6 +355,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
/* 135 Series */
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
@@ -387,6 +388,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_bus = IWL_BUS_GET_PCI_BUS(bus);
pci_bus->pci_dev = pdev;
+ pci_set_drvdata(pdev, bus);
+
/* W/A - seems to solve weird behavior. We need to remove this if we
* don't want to stay in L1 all the time. This wastes a lot of power */
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
@@ -457,9 +460,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus->dev = &pdev->dev;
bus->irq = pdev->irq;
- bus->ops = &pci_ops;
+ bus->ops = &bus_ops_pci;
- err = iwl_probe(bus, cfg);
+ err = iwl_probe(bus, &trans_ops_pcie, cfg);
if (err)
goto out_disable_msi;
return 0;
@@ -480,12 +483,12 @@ out_no_pci:
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- struct iwl_bus *bus = priv->bus;
+ struct iwl_bus *bus = pci_get_drvdata(pdev);
struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+ struct iwl_shared *shrd = bus->shrd;
- iwl_remove(priv);
+ iwl_remove(shrd->priv);
pci_disable_msi(pci_dev);
pci_iounmap(pci_dev, pci_bus->hw_base);
@@ -496,25 +499,27 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
kfree(bus);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int iwl_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
+ struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_shared *shrd = bus->shrd;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_suspend(priv);
+ return iwl_trans_suspend(shrd->trans);
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
+ struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_shared *shrd = bus->shrd;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -527,7 +532,7 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_resume(priv);
+ return iwl_trans_resume(shrd->trans);
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index cd64df05f9e..4eaab204322 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -43,6 +43,7 @@
#include "iwl-debug.h"
#include "iwl-power.h"
#include "iwl-trans.h"
+#include "iwl-shared.h"
/*
* Setting power level allows the card to go to sleep when not busy.
@@ -214,7 +215,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
- if (priv->cfg->base_params->shadow_reg_enable)
+ if (hw_params(priv).shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -300,7 +301,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
- if (priv->cfg->base_params->shadow_reg_enable)
+ if (hw_params(priv).shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -335,7 +336,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC,
+ return iwl_trans_send_cmd_pdu(trans(priv), POWER_TABLE_CMD, CMD_SYNC,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -347,7 +348,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
- if (priv->wowlan)
+ if (priv->shrd->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
else if (!priv->cfg->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
@@ -382,7 +383,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
int ret;
bool update_chains;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
/* Don't update the RX chain when chain noise calibration is running */
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
@@ -391,23 +392,23 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
return 0;
- if (!iwl_is_ready_rf(priv))
+ if (!iwl_is_ready_rf(priv->shrd))
return -EIO;
/* scan complete use sleep_power_next, need to be updated */
memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+ if (test_bit(STATUS_SCANNING, &priv->shrd->status) && !force) {
IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
return 0;
}
if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
+ set_bit(STATUS_POWER_PMI, &priv->shrd->status);
ret = iwl_set_power(priv, cmd);
if (!ret) {
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
+ clear_bit(STATUS_POWER_PMI, &priv->shrd->status);
if (update_chains)
iwl_update_chain_flags(priv);
@@ -435,7 +436,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- priv->power_data.bus_pm = bus_get_pm_support(priv->bus);
+ priv->power_data.bus_pm = bus_get_pm_support(bus(priv));
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 2f267b8aabb..bebdd828f32 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -217,8 +217,8 @@
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
#define SCD_QUEUECHAIN_SEL_ALL(priv) \
- (((1<<(priv)->hw_params.max_txq_num) - 1) &\
- (~(1<<(priv)->cmd_queue)))
+ (((1<<hw_params(priv).max_txq_num) - 1) &\
+ (~(1<<(priv)->shrd->cmd_queue)))
#define SCD_BASE (PRPH_BASE + 0xa02c00)
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 77e528f5db8..e5d727f537d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -33,9 +33,7 @@
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
@@ -68,14 +66,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->status) ||
- test_bit(STATUS_FW_ERROR, &priv->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (!test_bit(STATUS_READY, &priv->shrd->status) ||
+ !test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) ||
+ !test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status) ||
+ test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
return -EIO;
- ret = trans_send_cmd(&priv->trans, &cmd);
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
if (ret)
return ret;
@@ -91,7 +89,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
ret = -EIO;
}
- iwl_free_pages(priv, cmd.reply_page);
+ iwl_free_pages(priv->shrd, cmd.reply_page);
return ret;
}
@@ -103,24 +101,90 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
+ if (priv->scan_type == IWL_SCAN_ROC) {
+ ieee80211_remain_on_channel_expired(priv->hw);
+ priv->hw_roc_channel = NULL;
+ schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
+ }
+
priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
priv->scan_request = NULL;
}
+static void iwl_process_scan_complete(struct iwl_priv *priv)
+{
+ bool aborted;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status))
+ return;
+
+ IWL_DEBUG_SCAN(priv, "Completed scan.\n");
+
+ cancel_delayed_work(&priv->scan_check);
+
+ aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
+ if (aborted)
+ IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+ goto out_settings;
+ }
+
+ if (priv->scan_type == IWL_SCAN_ROC) {
+ ieee80211_remain_on_channel_expired(priv->hw);
+ priv->hw_roc_channel = NULL;
+ schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
+ }
+
+ if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
+ int err;
+
+ /* Check if mac80211 requested scan during our internal scan */
+ if (priv->scan_request == NULL)
+ goto out_complete;
+
+ /* If so request a new scan */
+ err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
+ priv->scan_request->channels[0]->band);
+ if (err) {
+ IWL_DEBUG_SCAN(priv,
+ "failed to initiate pending scan: %d\n", err);
+ aborted = true;
+ goto out_complete;
+ }
+
+ return;
+ }
+
+out_complete:
+ iwl_complete_scan(priv, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!iwl_is_ready_rf(priv->shrd))
+ return;
+
+ iwlagn_post_scan(priv);
+}
+
void iwl_force_scan_end(struct iwl_priv *priv)
{
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
return;
}
IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->status);
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
iwl_complete_scan(priv, true);
}
@@ -128,14 +192,14 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
return;
}
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
return;
}
@@ -154,7 +218,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
int iwl_scan_cancel(struct iwl_priv *priv)
{
IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ queue_work(priv->shrd->workqueue, &priv->abort_scan);
return 0;
}
@@ -163,28 +227,42 @@ int iwl_scan_cancel(struct iwl_priv *priv)
* @ms: amount of time to wait (in milliseconds) for scan to abort
*
*/
-int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies(ms);
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
iwl_do_scan_abort(priv);
while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->status))
- break;
+ if (!test_bit(STATUS_SCAN_HW, &priv->shrd->status))
+ goto finished;
msleep(20);
}
- return test_bit(STATUS_SCAN_HW, &priv->status);
+ return;
+
+ finished:
+ /*
+ * Now STATUS_SCAN_HW is clear. This means that the
+ * device finished, but the background work is going
+ * to execute at best as soon as we release the mutex.
+ * Since we need to be able to issue a new scan right
+ * after this function returns, run the complete here.
+ * The STATUS_SCAN_COMPLETE bit will then be cleared
+ * and prevent the background work from "completing"
+ * a possible new scan.
+ */
+ iwl_process_scan_complete(priv);
}
/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_reply_scan(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -193,11 +271,13 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
+ return 0;
}
/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanstart_notification *notif =
@@ -211,11 +291,20 @@ static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
+
+ if (priv->scan_type == IWL_SCAN_ROC &&
+ !priv->hw_roc_start_notified) {
+ ieee80211_ready_on_channel(priv->hw);
+ priv->hw_roc_start_notified = true;
+ }
+
+ return 0;
}
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -224,20 +313,24 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
+ "probe status: %u:%u "
"(TSF: 0x%08X:%08X) - %d "
"elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
+ notif->probe_status, notif->num_probe_not_sent,
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
+ return 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
@@ -247,14 +340,21 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
scan_notif->tsf_low,
scan_notif->tsf_high, scan_notif->status);
- /* The HW is no longer scanning */
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
(priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - priv->scan_start));
- queue_work(priv->workqueue, &priv->scan_completed);
+ /*
+ * When aborting, we run the scan completed background work inline
+ * and the background work must then do nothing. The SCAN_COMPLETE
+ * bit helps implement that logic and thus needs to be set before
+ * queueing the work. Also, since the scan abort waits for SCAN_HW
+ * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
+ * to avoid a race there.
+ */
+ set_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ queue_work(priv->shrd->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
iwl_advanced_bt_coexist(priv) &&
@@ -274,8 +374,10 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = scan_notif->bt_status;
- queue_work(priv->workqueue, &priv->bt_traffic_change_work);
+ queue_work(priv->shrd->workqueue,
+ &priv->bt_traffic_change_work);
}
+ return 0;
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -289,9 +391,8 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
iwl_rx_scan_complete_notif;
}
-inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes)
+static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band, u8 n_probes)
{
if (band == IEEE80211_BAND_5GHZ)
return IWL_ACTIVE_DWELL_TIME_52 +
@@ -301,40 +402,486 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
}
-u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
+static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
{
struct iwl_rxon_context *ctx;
+
+ /*
+ * If we're associated, we clamp the dwell time 98%
+ * of the smallest beacon interval (minus 2 * channel
+ * tune time)
+ */
+ for_each_context(priv, ctx) {
+ u16 value;
+
+ if (!iwl_is_associated_ctx(ctx))
+ continue;
+ value = ctx->beacon_int;
+ if (!value)
+ value = IWL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ dwell_time = min(value, dwell_time);
+ }
+
+ return dwell_time;
+}
+
+static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band)
+{
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
- if (iwl_is_any_associated(priv)) {
+ return iwl_limit_dwell(priv, passive);
+}
+
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ channel = iwl_get_single_channel_number(priv, band);
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_get_channel_info(priv, band, channel);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv,
+ "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = { sizeof(struct iwl_scan_cmd), },
+ .flags = CMD_SYNC,
+ };
+ struct iwl_scan_cmd *scan;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = hw_params(priv).valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (vif)
+ ctx = iwl_rxon_ctx_from_vif(vif);
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (priv->scan_type != IWL_SCAN_ROC &&
+ iwl_is_any_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ switch (priv->scan_type) {
+ case IWL_SCAN_ROC:
+ WARN_ON(1);
+ break;
+ case IWL_SCAN_RADIO_RESET:
+ interval = 0;
+ break;
+ case IWL_SCAN_NORMAL:
+ interval = vif->bss_conf.beacon_int;
+ break;
+ }
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ } else if (priv->scan_type == IWL_SCAN_ROC) {
+ scan->suspend_time = 0;
+ scan->max_out_time = 0;
+ scan->quiet_time = 0;
+ scan->quiet_plcp_th = 0;
+ }
+
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ break;
+ case IWL_SCAN_NORMAL:
+ if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+ break;
+ case IWL_SCAN_ROC:
+ IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
+ break;
+ }
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(
+ priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+ RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
/*
- * If we're associated, we clamp the maximum passive
- * dwell time to be 98% of the smallest beacon interval
- * (minus 2 * channel tune time)
+ * Internal scans are passive, so we can indiscriminately set
+ * the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
- for_each_context(priv, ctx) {
- u16 value;
-
- if (!iwl_is_associated_ctx(ctx))
- continue;
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
- if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- passive = min(value, passive);
+ if (priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist)
+ scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ *
+ * This was fixed in later versions along with some other
+ * scan changes, and the threshold behaves as a flag in those
+ * versions.
+ */
+ if (priv->new_scan_threshold_behaviour)
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_DISABLED;
+ else
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_rx_antennas[band])
+ rx_ant = priv->cfg->scan_rx_antennas[band];
+
+ if (band == IEEE80211_BAND_2GHZ &&
+ priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ /* transmit 2.4 GHz probes only on first antenna */
+ scan_tx_antennas = first_antenna(scan_tx_antennas);
+ }
+
+ priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv,
+ priv->scan_tx_ant[band],
+ scan_tx_antennas);
+ rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = first_antenna(active_chains);
+ }
+ if (priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist &&
+ priv->bt_full_concurrent) {
+ /* operated as 1x1 in full concurrency mode */
+ rx_ant = first_antenna(rx_ant);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |=
+ hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ switch (priv->scan_type) {
+ case IWL_SCAN_NORMAL:
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ vif->addr,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ break;
+ case IWL_SCAN_RADIO_RESET:
+ case IWL_SCAN_ROC:
+ /* use bcast addr, will not be transmitted but must be valid */
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ iwl_bcast_addr, NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ break;
+ default:
+ BUG();
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_NORMAL:
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_ROC: {
+ struct iwl_scan_channel *scan_ch;
+ int n_chan, i;
+ u16 dwell;
+
+ dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
+ n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
+
+ scan->channel_count = n_chan;
+
+ scan_ch = (void *)&scan->data[cmd_len];
+
+ for (i = 0; i < n_chan; i++) {
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->channel =
+ cpu_to_le16(priv->hw_roc_channel->hw_value);
+
+ if (i == n_chan - 1)
+ dwell = priv->hw_roc_duration - i * dwell;
+
+ scan_ch->active_dwell =
+ scan_ch->passive_dwell = cpu_to_le16(dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ scan_ch++;
}
+ }
+
+ break;
+ }
+
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data[0] = scan;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ scan->len = cpu_to_le16(cmd.len[0]);
+
+ /* set scan bit here for PAN params */
+ set_bit(STATUS_SCAN_HW, &priv->shrd->status);
+
+ ret = iwlagn_set_pan_params(priv);
+ if (ret)
+ return ret;
+
+ ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ if (ret) {
+ clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ iwlagn_set_pan_params(priv);
}
- return passive;
+ return ret;
}
void iwl_init_scan_params(struct iwl_priv *priv)
{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+ u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1;
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -348,50 +895,50 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&priv->shrd->mutex);
cancel_delayed_work(&priv->scan_check);
- if (!iwl_is_ready_rf(priv)) {
+ if (!iwl_is_ready_rf(priv->shrd)) {
IWL_WARN(priv, "Request scan called when driver not ready.\n");
return -EIO;
}
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv,
"Multiple concurrent scan requests in parallel.\n");
return -EBUSY;
}
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
return -EBUSY;
}
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
scan_type == IWL_SCAN_NORMAL ? "" :
- scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
+ scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
"internal short ");
- set_bit(STATUS_SCANNING, &priv->status);
+ set_bit(STATUS_SCANNING, &priv->shrd->status);
priv->scan_type = scan_type;
priv->scan_start = jiffies;
priv->scan_band = band;
ret = iwlagn_request_scan(priv, vif);
if (ret) {
- clear_bit(STATUS_SCANNING, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->shrd->status);
priv->scan_type = IWL_SCAN_NORMAL;
return ret;
}
- queue_delayed_work(priv->workqueue, &priv->scan_check,
+ queue_delayed_work(priv->shrd->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
return 0;
}
-int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
@@ -403,7 +950,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
if (req->n_channels == 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
/*
* If an internal scan is in progress, just set
@@ -432,7 +979,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return ret;
}
@@ -443,7 +990,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
*/
void iwl_internal_short_hw_scan(struct iwl_priv *priv)
{
- queue_work(priv->workqueue, &priv->start_internal_scan);
+ queue_work(priv->shrd->workqueue, &priv->start_internal_scan);
}
static void iwl_bg_start_internal_scan(struct work_struct *work)
@@ -453,14 +1000,14 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
IWL_DEBUG_SCAN(priv, "Start internal scan\n");
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
goto unlock;
}
- if (test_bit(STATUS_SCANNING, &priv->status)) {
+ if (test_bit(STATUS_SCANNING, &priv->shrd->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
goto unlock;
}
@@ -468,7 +1015,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
unlock:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwl_bg_scan_check(struct work_struct *data)
@@ -481,9 +1028,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
/* Since we are here firmware does not finish scan and
* most likely is in bad shape, so we don't bother to
* send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
/**
@@ -541,70 +1088,19 @@ static void iwl_bg_abort_scan(struct work_struct *work)
/* We keep scan_check work queued in case when firmware will not
* report back scan completed notification */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
static void iwl_bg_scan_completed(struct work_struct *work)
{
struct iwl_priv *priv =
- container_of(work, struct iwl_priv, scan_completed);
- bool aborted;
-
- IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
- cancel_delayed_work(&priv->scan_check);
-
- mutex_lock(&priv->mutex);
-
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- if (aborted)
- IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
- goto out_settings;
- }
-
- if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->offchan_tx_skb) {
- ieee80211_tx_status_irqsafe(priv->hw,
- priv->offchan_tx_skb);
- priv->offchan_tx_skb = NULL;
- }
-
- if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
- int err;
-
- /* Check if mac80211 requested scan during our internal scan */
- if (priv->scan_request == NULL)
- goto out_complete;
-
- /* If so request a new scan */
- err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
- priv->scan_request->channels[0]->band);
- if (err) {
- IWL_DEBUG_SCAN(priv,
- "failed to initiate pending scan: %d\n", err);
- aborted = true;
- goto out_complete;
- }
-
- goto out;
- }
-
-out_complete:
- iwl_complete_scan(priv, aborted);
-
-out_settings:
- /* Can we still talk to firmware ? */
- if (!iwl_is_ready_rf(priv))
- goto out;
-
- iwlagn_post_scan(priv);
+ container_of(work, struct iwl_priv, scan_completed);
-out:
- mutex_unlock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
+ iwl_process_scan_complete(priv);
+ mutex_unlock(&priv->shrd->mutex);
}
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
@@ -622,8 +1118,8 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->scan_completed);
if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
new file mode 100644
index 00000000000..1f7a93c67c4
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -0,0 +1,534 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_shared_h__
+#define __iwl_shared_h__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
+#include <linux/mm.h> /* for page_address */
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+
+/**
+ * DOC: shared area - role and goal
+ *
+ * The shared area contains all the data exported by the upper layer to the
+ * other layers. Since the bus and transport layer shouldn't dereference
+ * iwl_priv, all the data needed by the upper layer and the transport / bus
+ * layer must be here.
+ * The shared area also holds pointer to all the other layers. This allows a
+ * layer to call a function from another layer.
+ *
+ * NOTE: All the layers hold a pointer to the shared area which must be shrd.
+ * A few macros assume that (_m)->shrd points to the shared area no matter
+ * what _m is.
+ *
+ * gets notifications about enumeration, suspend, resume.
+ * For the moment, the bus layer is not a linux kernel module as itself, and
+ * the module_init function of the driver must call the bus specific
+ * registration functions. These functions are listed at the end of this file.
+ * For the moment, there is only one implementation of this interface: PCI-e.
+ * This implementation is iwl-pci.c
+ */
+
+struct iwl_cfg;
+struct iwl_bus;
+struct iwl_priv;
+struct iwl_sensitivity_ranges;
+struct iwl_trans_ops;
+
+#define DRV_NAME "iwlwifi"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+extern struct iwl_mod_params iwlagn_mod_params;
+
+/**
+ * struct iwl_mod_params
+ *
+ * Holds the module parameters
+ *
+ * @sw_crypto: using hardware encryption, default = 0
+ * @num_of_queues: number of tx queue, HW dependent
+ * @disable_11n: 11n capabilities enabled, default = 0
+ * @amsdu_size_8K: enable 8K amsdu size, default = 1
+ * @antenna: both antennas (use diversity), default = 0
+ * @restart_fw: restart firmware, default = 1
+ * @plcp_check: enable plcp health check, default = true
+ * @ack_check: disable ack health check, default = false
+ * @wd_disable: enable stuck queue check, default = false
+ * @bt_coex_active: enable bt coex, default = true
+ * @led_mode: system default, default = 0
+ * @no_sleep_autoadjust: disable autoadjust, default = true
+ * @power_save: disable power save, default = false
+ * @power_level: power level, default = 1
+ * @debug_level: levels are IWL_DL_*
+ * @ant_coupling: antenna coupling in dB, default = 0
+ * @bt_ch_announce: BT channel inhibition, default = enable
+ * @wanted_ucode_alternative: ucode alternative to use, default = 1
+ * @auto_agg: enable agg. without check, default = true
+ */
+struct iwl_mod_params {
+ int sw_crypto;
+ int num_of_queues;
+ int disable_11n;
+ int amsdu_size_8K;
+ int antenna;
+ int restart_fw;
+ bool plcp_check;
+ bool ack_check;
+ bool wd_disable;
+ bool bt_coex_active;
+ int led_mode;
+ bool no_sleep_autoadjust;
+ bool power_save;
+ int power_level;
+ u32 debug_level;
+ int ant_coupling;
+ bool bt_ch_announce;
+ int wanted_ucode_alternative;
+ bool auto_agg;
+};
+
+/**
+ * struct iwl_hw_params
+ *
+ * Holds the module parameters
+ *
+ * @max_txq_num: Max # Tx queues supported
+ * @num_ampdu_queues: num of ampdu queues
+ * @tx_chains_num: Number of TX chains
+ * @rx_chains_num: Number of RX chains
+ * @valid_tx_ant: usable antennas for TX
+ * @valid_rx_ant: usable antennas for RX
+ * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
+ * @sku: sku read from EEPROM
+ * @rx_page_order: Rx buffer page order
+ * @max_inst_size: for ucode use
+ * @max_data_size: for ucode use
+ * @ct_kill_threshold: temperature threshold - in hw dependent unit
+ * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
+ * relevant for 1000, 6000 and up
+ * @wd_timeout: TX queues watchdog timeout
+ * @calib_init_cfg: setup initial calibrations for the hw
+ * @calib_rt_cfg: setup runtime calibrations for the hw
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ */
+struct iwl_hw_params {
+ u8 max_txq_num;
+ u8 num_ampdu_queues;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u8 ht40_channel;
+ bool shadow_reg_enable;
+ u16 sku;
+ u32 rx_page_order;
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 ct_kill_threshold;
+ u32 ct_kill_exit_threshold;
+ unsigned int wd_timeout;
+
+ u32 calib_init_cfg;
+ u32 calib_rt_cfg;
+ const struct iwl_sensitivity_ranges *sens;
+};
+
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (REPLY_TX), and the block ack notification
+ * (REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session - used by the transport layer.
+ * Needed by the upper layer for debugfs only.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+ u32 rate_n_flags;
+ enum iwl_agg_state state;
+ u16 txq_id;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @tfds_in_queue: number of packets sent to the HW queues.
+ * Exported for debugfs only
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+ u16 seq_number;
+ u16 tfds_in_queue;
+ struct iwl_ht_agg agg;
+};
+
+/**
+ * struct iwl_shared - shared fields for all the layers of the driver
+ *
+ * @dbg_level_dev: dbg level set per device. Prevails on
+ * iwlagn_mod_params.debug_level if set (!= 0)
+ * @ucode_owner: IWL_OWNERSHIP_*
+ * @cmd_queue: command queue number
+ * @status: STATUS_*
+ * @valid_contexts: microcode/device supports multiple contexts
+ * @bus: pointer to the bus layer data
+ * @priv: pointer to the upper layer data
+ * @hw_params: see struct iwl_hw_params
+ * @workqueue: the workqueue used by all the layers of the driver
+ * @lock: protect general shared data
+ * @sta_lock: protects the station table.
+ * If lock and sta_lock are needed, lock must be acquired first.
+ * @mutex:
+ */
+struct iwl_shared {
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 dbg_level_dev;
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+#define IWL_OWNERSHIP_DRIVER 0
+#define IWL_OWNERSHIP_TM 1
+ u8 ucode_owner;
+ u8 cmd_queue;
+ unsigned long status;
+ bool wowlan;
+ u8 valid_contexts;
+
+ struct iwl_bus *bus;
+ struct iwl_priv *priv;
+ struct iwl_trans *trans;
+ struct iwl_hw_params hw_params;
+
+ struct workqueue_struct *workqueue;
+ spinlock_t lock;
+ spinlock_t sta_lock;
+ struct mutex mutex;
+
+ struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
+
+ wait_queue_head_t wait_command_queue;
+};
+
+/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
+#define priv(_m) ((_m)->shrd->priv)
+#define bus(_m) ((_m)->shrd->bus)
+#define trans(_m) ((_m)->shrd->trans)
+#define hw_params(_m) ((_m)->shrd->hw_params)
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ * iwl_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
+{
+ if (shrd->dbg_level_dev)
+ return shrd->dbg_level_dev;
+ else
+ return iwlagn_mod_params.debug_level;
+}
+#else
+static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
+{
+ return iwlagn_mod_params.debug_level;
+}
+#endif
+
+static inline void iwl_free_pages(struct iwl_shared *shrd, unsigned long page)
+{
+ free_pages(page, shrd->hw_params.rx_page_order);
+}
+
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_inc_wrap(int index, int n_bd)
+{
+ return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_dec_wrap(int index, int n_bd)
+{
+ return --index & (n_bd - 1);
+}
+
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+enum iwl_rxon_context_id {
+ IWL_RXON_CTX_BSS,
+ IWL_RXON_CTX_PAN,
+
+ NUM_IWL_RXON_CTX
+};
+
+int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
+ struct iwl_cfg *cfg);
+void __devexit iwl_remove(struct iwl_priv * priv);
+struct iwl_device_cmd;
+int __must_check iwl_rx_dispatch(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
+void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid);
+void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctx,
+ u8 sta_id, u8 tid);
+void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
+void iwl_nic_config(struct iwl_priv *priv);
+void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
+void iwl_apm_stop(struct iwl_priv *priv);
+int iwl_apm_init(struct iwl_priv *priv);
+void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
+const char *get_cmd_string(u8 cmd);
+bool iwl_check_for_ct_kill(struct iwl_priv *priv);
+
+void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
+void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_reset_traffic_log(struct iwl_priv *priv);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid);
+#else
+static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid)
+{
+}
+#endif
+
+#define IWL_CMD(x) case x: return #x
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+#define IWL_TRAFFIC_ENTRIES (256)
+#define IWL_TRAFFIC_ENTRY_SIZE (64)
+
+/*****************************************************
+* DRIVER STATUS FUNCTIONS
+******************************************************/
+#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
+#define STATUS_INT_ENABLED 2
+#define STATUS_RF_KILL_HW 3
+#define STATUS_CT_KILL 4
+#define STATUS_INIT 5
+#define STATUS_ALIVE 6
+#define STATUS_READY 7
+#define STATUS_TEMPERATURE 8
+#define STATUS_GEO_CONFIGURED 9
+#define STATUS_EXIT_PENDING 10
+#define STATUS_STATISTICS 12
+#define STATUS_SCANNING 13
+#define STATUS_SCAN_ABORTING 14
+#define STATUS_SCAN_HW 15
+#define STATUS_POWER_PMI 16
+#define STATUS_FW_ERROR 17
+#define STATUS_DEVICE_ENABLED 18
+#define STATUS_CHANNEL_SWITCH_PENDING 19
+#define STATUS_SCAN_COMPLETE 20
+
+static inline int iwl_is_ready(struct iwl_shared *shrd)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(STATUS_READY, &shrd->status) &&
+ test_bit(STATUS_GEO_CONFIGURED, &shrd->status) &&
+ !test_bit(STATUS_EXIT_PENDING, &shrd->status);
+}
+
+static inline int iwl_is_alive(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_ALIVE, &shrd->status);
+}
+
+static inline int iwl_is_init(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_INIT, &shrd->status);
+}
+
+static inline int iwl_is_rfkill_hw(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_RF_KILL_HW, &shrd->status);
+}
+
+static inline int iwl_is_rfkill(struct iwl_shared *shrd)
+{
+ return iwl_is_rfkill_hw(shrd);
+}
+
+static inline int iwl_is_ctkill(struct iwl_shared *shrd)
+{
+ return test_bit(STATUS_CT_KILL, &shrd->status);
+}
+
+static inline int iwl_is_ready_rf(struct iwl_shared *shrd)
+{
+ if (iwl_is_rfkill(shrd))
+ return 0;
+
+ return iwl_is_ready(shrd);
+}
+
+#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
deleted file mode 100644
index 1ef3b7106ad..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ /dev/null
@@ -1,832 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/lockdep.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-trans.h"
-#include "iwl-agn.h"
-
-/* priv->sta_lock must be held */
-static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
-{
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_ASSOC(priv,
- "STA id %u addr %pM already present in uCode (according to driver)\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- } else {
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- }
-}
-
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
- bool sync)
-{
- u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
- int ret = -EIO;
-
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_sta_ucode_activate(priv, sta_id);
- ret = 0;
- break;
- case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
- break;
- case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
- sta_id);
- break;
- case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
- break;
- default:
- IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
- break;
- }
-
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static void iwl_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *)cmd->cmd.payload;
-
- iwl_process_add_sta_resp(priv, addsta, pkt, false);
-
-}
-
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
-int iwl_send_add_sta(struct iwl_priv *priv,
- struct iwl_addsta_cmd *sta, u8 flags)
-{
- struct iwl_rx_packet *pkt = NULL;
- int ret = 0;
- u8 data[sizeof(*sta)];
- struct iwl_host_cmd cmd = {
- .id = REPLY_ADD_STA,
- .flags = flags,
- .data = { data, },
- };
- u8 sta_id __maybe_unused = sta->sta.sta_id;
-
- IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
- sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
-
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_add_sta_callback;
- else {
- cmd.flags |= CMD_WANT_SKB;
- might_sleep();
- }
-
- cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
- ret = trans_send_cmd(&priv->trans, &cmd);
-
- if (ret || (flags & CMD_ASYNC))
- return ret;
-
- if (ret == 0) {
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
- }
- iwl_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
- u8 mimo_ps_mode;
-
- if (!sta || !sta_ht_inf->ht_supported)
- goto done;
-
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
- "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
- "dynamic" : "disabled");
-
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
- break;
- }
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
-
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
-
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
-}
-
-/**
- * iwl_prep_station - Prepare station information for addition
- *
- * should be called with sta_lock held
- */
-u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
-{
- struct iwl_station_entry *station;
- int i;
- u8 sta_id = IWL_INVALID_STATION;
-
- if (is_ap)
- sta_id = ctx->ap_sta_id;
- else if (is_broadcast_ether_addr(addr))
- sta_id = ctx->bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
-
- if (!priv->stations[i].used &&
- sta_id == IWL_INVALID_STATION)
- sta_id = i;
- }
-
- /*
- * These two conditions have the same outcome, but keep them
- * separate
- */
- if (unlikely(sta_id == IWL_INVALID_STATION))
- return sta_id;
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
- sta_id);
- return sta_id;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
- !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- return sta_id;
- }
-
- station = &priv->stations[sta_id];
- station->used = IWL_STA_DRIVER_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
- sta_id, addr);
- priv->num_stations++;
-
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
- memcpy(station->sta.sta.addr, addr, ETH_ALEN);
- station->sta.mode = 0;
- station->sta.sta.sta_id = sta_id;
- station->sta.station_flags = ctx->station_flags;
- station->ctxid = ctx->ctxid;
-
- if (sta) {
- struct iwl_station_priv_common *sta_priv;
-
- sta_priv = (void *)sta->drv_priv;
- sta_priv->ctx = ctx;
- }
-
- /*
- * OK to call unconditionally, since local stations (IBSS BSSID
- * STA and broadcast STA) pass in a NULL sta, and mac80211
- * doesn't allow HT IBSS.
- */
- iwl_set_ht_add_station(priv, sta_id, sta, ctx);
-
- return sta_id;
-
-}
-
-#define STA_WAIT_TIMEOUT (HZ/2)
-
-/**
- * iwl_add_station_common -
- */
-int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r)
-{
- unsigned long flags_spin;
- int ret = 0;
- u8 sta_id;
- struct iwl_addsta_cmd sta_cmd;
-
- *sta_id_r = 0;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
- addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
- sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- /* Add station to device's station table */
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[sta_id].sta.sta.addr);
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- *sta_id_r = sta_id;
- return ret;
-}
-
-/**
- * iwl_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->sta_lock must be held
- */
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
-{
- /* Ucode must be active and driver must be non active */
- if ((priv->stations[sta_id].used &
- (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %u\n", sta_id);
-
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
-
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
-}
-
-static int iwl_send_remove_station(struct iwl_priv *priv,
- const u8 *addr, int sta_id,
- bool temporary)
-{
- struct iwl_rx_packet *pkt;
- int ret;
-
- unsigned long flags_spin;
- struct iwl_rem_sta_cmd rm_sta_cmd;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_REMOVE_STA,
- .len = { sizeof(struct iwl_rem_sta_cmd), },
- .flags = CMD_SYNC,
- .data = { &rm_sta_cmd, },
- };
-
- memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
- rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
-
- cmd.flags |= CMD_WANT_SKB;
-
- ret = trans_send_cmd(&priv->trans, &cmd);
-
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
- if (!ret) {
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
- }
- iwl_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-/**
- * iwl_remove_station - Remove driver's knowledge of station.
- */
-int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr)
-{
- unsigned long flags;
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Unable to remove station %pM, device not ready.\n",
- addr);
- /*
- * It is typical for stations to be removed when we are
- * going down. Return success since device will be down
- * soon anyway
- */
- return 0;
- }
-
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
-
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
- addr);
- goto out_err;
- }
-
- if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
- addr);
- goto out_err;
- }
-
- if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
- kfree(priv->stations[sta_id].lq);
- priv->stations[sta_id].lq = NULL;
- }
-
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-
- priv->num_stations--;
-
- if (WARN_ON(priv->num_stations < 0))
- priv->num_stations = 0;
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_send_remove_station(priv, addr, sta_id, false);
-out_err:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return -EINVAL;
-}
-
-/**
- * iwl_clear_ucode_stations - clear ucode station table bits
- *
- * This function clears all the bits in the driver indicating
- * which stations are active in the ucode. Call when something
- * other than explicit station management would cause this in
- * the ucode, e.g. unassociated RXON.
- */
-void iwl_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int i;
- unsigned long flags_spin;
- bool cleared = false;
-
- IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != priv->stations[i].ctxid)
- continue;
-
- if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- cleared = true;
- }
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- if (!cleared)
- IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
-}
-
-/**
- * iwl_restore_stations() - Restore driver known stations to device
- *
- * All stations considered active by driver, but not present in ucode, is
- * restored.
- *
- * Function sleeps.
- */
-void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
- int i;
- bool found = false;
- int ret;
- bool send_lq;
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
- return;
- }
-
- IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx->ctxid != priv->stations[i].ctxid)
- continue;
- if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].sta.mode = 0;
- priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
- found = true;
- }
- }
-
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
- memcpy(&sta_cmd, &priv->stations[i].sta,
- sizeof(struct iwl_addsta_cmd));
- send_lq = false;
- if (priv->stations[i].lq) {
- memcpy(&lq, priv->stations[i].lq,
- sizeof(struct iwl_link_quality_cmd));
- send_lq = true;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- /*
- * Rate scaling has already been initialized, send
- * current LQ command
- */
- if (send_lq)
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- }
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- if (!found)
- IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
- else
- IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
-}
-
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- int sta_id = ctx->ap_sta_id;
- int ret;
- struct iwl_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- bool active;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return;
- }
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- sta_cmd.mode = 0;
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
-
- active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- if (active) {
- ret = iwl_send_remove_station(
- priv, priv->stations[sta_id].sta.sta.addr,
- sta_id, true);
- if (ret)
- IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- }
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret)
- IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
-}
-
-int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->sta_key_max_num; i++)
- if (!test_and_set_bit(i, &priv->ucode_key_table))
- return i;
-
- return WEP_INVALID_OFFSET;
-}
-
-void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (!(priv->stations[i].used & IWL_STA_BCAST))
- continue;
-
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- priv->num_stations--;
- if (WARN_ON(priv->num_stations < 0))
- priv->num_stations = 0;
- kfree(priv->stations[i].lq);
- priv->stations[i].lq = NULL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-static void iwl_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
- IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
- IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
- lq->general_params.single_stream_ant_msk,
- lq->general_params.dual_stream_ant_msk);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
- i, lq->rs_table[i].rate_n_flags);
-}
-#else
-static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
-}
-#endif
-
-/**
- * is_lq_table_valid() - Test one aspect of LQ cmd for validity
- *
- * It sometimes happens when a HT rate has been in use and we
- * loose connectivity with AP then mac80211 will first tell us that the
- * current channel is not HT anymore before removing the station. In such a
- * scenario the RXON flags will be updated to indicate we are not
- * communicating HT anymore, but the LQ command may still contain HT rates.
- * Test for this to prevent driver from sending LQ command between the time
- * RXON flags are updated and when LQ command is updated.
- */
-static bool is_lq_table_valid(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
-
- if (ctx->ht.enabled)
- return true;
-
- IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
- ctx->active.channel);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
- IWL_DEBUG_INFO(priv,
- "index %d of LQ expects HT channel\n",
- i);
- return false;
- }
- }
- return true;
-}
-
-/**
- * iwl_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- * after station has been added.
- *
- * The link quality command is sent as the last step of station creation.
- * This is the special case in which init is set and we call a callback in
- * this case to clear the state indicating that station creation is in
- * progress.
- */
-int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init)
-{
- int ret = 0;
- unsigned long flags_spin;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = { sizeof(struct iwl_link_quality_cmd), },
- .flags = flags,
- .data = { lq, },
- };
-
- if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- iwl_dump_lq_cmd(priv, lq);
- if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
- return -EINVAL;
-
- if (is_lq_table_valid(priv, ctx, lq))
- ret = trans_send_cmd(&priv->trans, &cmd);
- else
- ret = -EINVAL;
-
- if (cmd.flags & CMD_ASYNC)
- return ret;
-
- if (init) {
- IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
- lq->sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- return ret;
-}
-
-int iwl_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->mutex);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
deleted file mode 100644
index 9a6768d6685..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_sta_h__
-#define __iwl_sta_h__
-
-#include "iwl-dev.h"
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
- being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
- (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwl_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
-int iwl_send_add_sta(struct iwl_priv *priv,
- struct iwl_addsta_cmd *sta, u8 flags);
-int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr);
-int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
-
-int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-
-/**
- * iwl_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_sta_id(struct ieee80211_sta *sta)
-{
- if (WARN_ON(!sta))
- return IWL_INVALID_STATION;
-
- return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
index b11f60de4f1..5e50d88f302 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -63,6 +63,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <net/net_namespace.h>
#include <linux/netdevice.h>
#include <net/cfg80211.h>
@@ -72,7 +73,6 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
-#include "iwl-fh.h"
#include "iwl-io.h"
#include "iwl-agn.h"
#include "iwl-testmode.h"
@@ -184,7 +184,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
if (priv->testmode_trace.trace_enabled) {
if (priv->testmode_trace.cpu_addr &&
priv->testmode_trace.dma_addr)
- dma_free_coherent(priv->bus->dev,
+ dma_free_coherent(bus(priv)->dev,
priv->testmode_trace.total_size,
priv->testmode_trace.cpu_addr,
priv->testmode_trace.dma_addr);
@@ -239,7 +239,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
/* ok, let's submit the command to ucode */
- return trans_send_cmd(&priv->trans, &cmd);
+ return iwl_trans_send_cmd(trans(priv), &cmd);
}
@@ -277,7 +277,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_REG_READ32:
- val32 = iwl_read32(priv, ofs);
+ val32 = iwl_read32(bus(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -299,7 +299,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
- iwl_write32(priv, ofs, val32);
+ iwl_write32(bus(priv), ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_REG_WRITE8:
@@ -309,7 +309,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
- iwl_write8(priv, ofs, val8);
+ iwl_write8(bus(priv), ofs, val8);
}
break;
default:
@@ -405,7 +405,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
- trans_stop_device(&priv->trans);
+ iwl_trans_stop_device(trans(priv));
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
@@ -484,7 +484,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
struct iwl_priv *priv = hw->priv;
struct sk_buff *skb;
int status = 0;
- struct device *dev = priv->bus->dev;
+ struct device *dev = bus(priv)->dev;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
@@ -613,7 +613,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
- priv->ucode_owner = owner;
+ priv->shrd->ucode_owner = owner;
else {
IWL_DEBUG_INFO(priv, "Invalid owner\n");
return -EINVAL;
@@ -641,7 +641,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
* @data: pointer to user space message
* @len: length in byte of @data
*/
-int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
struct nlattr *tb[IWL_TM_ATTR_MAX];
struct iwl_priv *priv = hw->priv;
@@ -661,7 +661,7 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
return -ENOMSG;
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
@@ -702,11 +702,11 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
break;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return result;
}
-int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
@@ -738,7 +738,7 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
+ mutex_lock(&priv->shrd->mutex);
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
@@ -749,6 +749,6 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
break;
}
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->shrd->mutex);
return result;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
deleted file mode 100644
index b79330d8418..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_trans_int_pcie_h__
-#define __iwl_trans_int_pcie_h__
-
-/*This file includes the declaration that are internal to the
- * trans_pcie layer */
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_bg_rx_replenish(struct work_struct *data);
-void iwl_irq_tasklet(struct iwl_priv *priv);
-void iwlagn_rx_replenish(struct iwl_priv *priv);
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-
-/*****************************************************
-* ICT
-******************************************************/
-int iwl_reset_ict(struct iwl_priv *priv);
-void iwl_disable_ict(struct iwl_priv *priv);
-int iwl_alloc_isr_ict(struct iwl_priv *priv);
-void iwl_free_isr_ict(struct iwl_priv *priv);
-irqreturn_t iwl_isr_ict(int irq, void *data);
-
-
-/*****************************************************
-* TX / HCMD
-******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int index);
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset);
-int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id);
-int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
- u16 len, const void *data);
-void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
-void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index);
-void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit);
-
-#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
new file mode 100644
index 00000000000..2b6756e8b8f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -0,0 +1,436 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+
+#include "iwl-fh.h"
+#include "iwl-csr.h"
+#include "iwl-shared.h"
+#include "iwl-trans.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+
+struct iwl_tx_queue;
+struct iwl_queue;
+struct iwl_host_cmd;
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+/**
+ * struct isr_statistics - interrupt statistics
+ *
+ */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 tx;
+ u32 unhandled;
+};
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @write_actual:
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock:
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct iwl_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+struct iwl_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE 8
+
+struct iwl_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct iwl_host_cmd *source;
+
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues.
+ *
+ * Note the difference between n_bd and n_window: the hardware
+ * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * the software buffers (in the variables @meta, @txb in struct
+ * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
+ * in the same struct) have 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
+ */
+struct iwl_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (index) host_w*/
+ int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_window; /* safe queue window */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write index
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ * @sta_id: valid if sched_retry is set
+ * @tid: valid if sched_retry is set
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_tx_queue {
+ struct iwl_queue q;
+ struct iwl_tfd *tfds;
+ struct iwl_device_cmd **cmd;
+ struct iwl_cmd_meta *meta;
+ struct sk_buff **skbs;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+
+ u16 sta_id;
+ u16 tid;
+};
+
+/**
+ * struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_replenish: work that will be called when buffers need to be allocated
+ * @trans: pointer to the generic transport area
+ * @scd_base_addr: scheduler sram base address in SRAM
+ * @scd_bc_tbls: pointer to the byte count table of the scheduler
+ * @kw: keep warm address
+ * @ac_to_fifo: to what fifo is a specifc AC mapped ?
+ * @ac_to_queue: to what tx queue is a specifc AC mapped ?
+ * @mcast_queue:
+ * @txq: Tx DMA processing queues
+ * @txq_ctx_active_msk: what queue is active
+ * queue_stopped: tracks what queue is stopped
+ * queue_stop_count: tracks what SW queue is stopped
+ */
+struct iwl_trans_pcie {
+ struct iwl_rx_queue rxq;
+ struct work_struct rx_replenish;
+ struct iwl_trans *trans;
+
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+ struct tasklet_struct irq_tasklet;
+ struct isr_statistics isr_stats;
+
+ u32 inta_mask;
+ u32 scd_base_addr;
+ struct iwl_dma_ptr scd_bc_tbls;
+ struct iwl_dma_ptr kw;
+
+ const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
+ const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
+ u8 mcast_queue[NUM_IWL_RXON_CTX];
+
+ struct iwl_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+#define IWL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ atomic_t queue_stop_count[4];
+};
+
+#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
+ ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_bg_rx_replenish(struct work_struct *data);
+void iwl_irq_tasklet(struct iwl_trans *trans);
+void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_rx_queue *q);
+
+/*****************************************************
+* ICT
+******************************************************/
+int iwl_reset_ict(struct iwl_trans *trans);
+void iwl_disable_ict(struct iwl_trans *trans);
+int iwl_alloc_isr_ict(struct iwl_trans *trans);
+void iwl_free_isr_ict(struct iwl_trans *trans);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+void iwl_txq_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq);
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset);
+int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
+int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_tx_cmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb, int handler_status);
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid);
+void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn);
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid, int frame_limit);
+void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int index, enum dma_data_direction dma_dir);
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+ struct sk_buff_head *skbs);
+int iwl_queue_space(const struct iwl_queue *q);
+
+/*****************************************************
+* Error handling
+******************************************************/
+int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
+ char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+void iwl_dump_csr(struct iwl_trans *trans);
+
+/*****************************************************
+* Helpers
+******************************************************/
+static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+{
+ clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(bus(trans), CSR_INT, 0xffffffff);
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
+ IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+}
+
+static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
+ set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+ iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
+ if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+ iwl_wake_sw_queue(priv(trans), ac);
+}
+
+static inline void iwl_stop_queue(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
+ if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+ iwl_stop_sw_queue(priv(trans), ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
+ int txq_id)
+{
+ set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
+}
+
+static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
+ int txq_id)
+{
+ clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
+}
+
+static inline int iwl_queue_used(const struct iwl_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ?
+ (i >= q->read_ptr && i < q->write_ptr) :
+ !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+#define IWL_TX_FIFO_BK 0 /* shared */
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2 /* shared */
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN 4
+#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN 5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX 5
+#define IWL_TX_FIFO_UNUSED -1
+
+/* AUX (TX during scan dwell) queue */
+#define IWL_AUX_QUEUE 10
+
+#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
new file mode 100644
index 00000000000..374c68cc1d7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -0,0 +1,1435 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+/*TODO: Remove include to iwl-core.h*/
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-trans-pcie-int.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rx_queue_alloc() Allocates rx_free
+ * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls iwl_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_rx_queue_space - Return number of free slots available in queue.
+ */
+static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+/**
+ * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
+ struct iwl_rx_queue *q)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ if (hw_params(trans).shadow_reg_enable) {
+ /* shadow register enabled */
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual);
+ } else {
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
+ reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans,
+ "Rx queue requesting wakeup,"
+ " GP1 = 0x%x\n", reg);
+ iwl_set_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
+ q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
+ q->write_actual);
+ }
+ }
+ q->need_update = 0;
+
+ exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_queue_update_write_ptr(trans, rxq);
+ }
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (hw_params(trans).rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask,
+ hw_params(trans).rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+ "order: %d\n",
+ hw_params(trans).rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(trans, "Failed to alloc_pages with %s."
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, hw_params(trans).rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwlagn_rx_replenish(struct iwl_trans *trans)
+{
+ unsigned long flags;
+
+ iwlagn_rx_allocate(trans, GFP_KERNEL);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwlagn_rx_queue_restock(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+}
+
+static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
+{
+ iwlagn_rx_allocate(trans, GFP_ATOMIC);
+
+ iwlagn_rx_queue_restock(trans);
+}
+
+void iwl_bg_rx_replenish(struct work_struct *data)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ container_of(data, struct iwl_trans_pcie, rx_replenish);
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ return;
+
+ mutex_lock(&trans->shrd->mutex);
+ iwlagn_rx_replenish(trans);
+ mutex_unlock(&trans->shrd->mutex);
+}
+
+/**
+ * iwl_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void iwl_rx_handle(struct iwl_trans *trans)
+{
+ struct iwl_rx_mem_buffer *rxb;
+ struct iwl_rx_packet *pkt;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_device_cmd *cmd;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+ int index, cmd_index;
+
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len, err;
+ u16 sequence;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ if (WARN_ON(rxb == NULL)) {
+ i = (i + 1) & RX_QUEUE_MASK;
+ continue;
+ }
+
+ rxq->queue[i] = NULL;
+
+ dma_unmap_page(bus(trans)->dev, rxb->page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ pkt = rxb_addr(rxb);
+
+ IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
+ i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv(trans), pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+ (pkt->hdr.cmd != REPLY_RX) &&
+ (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+ (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+ (pkt->hdr.cmd != REPLY_TX);
+
+ sequence = le16_to_cpu(pkt->hdr.sequence);
+ index = SEQ_TO_INDEX(sequence);
+ cmd_index = get_cmd_index(&txq->q, index);
+
+ if (reclaim)
+ cmd = txq->cmd[cmd_index];
+ else
+ cmd = NULL;
+
+ /* warn if this is cmd response / notification and the uCode
+ * didn't set the SEQ_RX_FRAME for a frame that is
+ * uCode-originated
+ * If you saw this code after the second half of 2012, then
+ * please remove it
+ */
+ WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
+ (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
+ "reclaim is false, SEQ_RX_FRAME unset: %s\n",
+ get_cmd_string(pkt->hdr.cmd));
+
+ err = iwl_rx_dispatch(priv(trans), rxb, cmd);
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * iwl_trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ iwl_tx_cmd_complete(trans, rxb, err);
+ else
+ IWL_WARN(trans, "Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
+ 0, PAGE_SIZE <<
+ hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ iwlagn_rx_replenish_now(trans);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ iwlagn_rx_replenish_now(trans);
+ else
+ iwlagn_rx_queue_restock(trans);
+}
+
+static const char * const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STABLE",
+ "FH_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT",
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+static void iwl_dump_nic_error_log(struct iwl_trans *trans)
+{
+ u32 base;
+ struct iwl_error_event_table table;
+ struct iwl_priv *priv = priv(trans);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ base = priv->device_pointers.error_event_table;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ if (!base)
+ base = priv->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = priv->inst_errlog_ptr;
+ }
+
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_ERR(trans,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (priv->ucode_type == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ trans->shrd->status, table.valid);
+ }
+
+ trans_pcie->isr_stats.err_code = table.error_id;
+
+ trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
+ table.data1, table.data2, table.line,
+ table.blink1, table.blink2, table.ilink1,
+ table.ilink2, table.bcon_time, table.gp1,
+ table.gp2, table.gp3, table.ucode_ver,
+ table.hw_ver, table.brd_ver);
+ IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
+ IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
+ IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(trans, "0x%08X | data1\n", table.data1);
+ IWL_ERR(trans, "0x%08X | data2\n", table.data2);
+ IWL_ERR(trans, "0x%08X | line\n", table.line);
+ IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
+ IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
+ IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+}
+
+/**
+ * iwl_irq_handle_error - called for HW or SW error interrupt from card
+ */
+static void iwl_irq_handle_error(struct iwl_trans *trans)
+{
+ struct iwl_priv *priv = priv(trans);
+ /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+ if (priv->cfg->internal_wimax_coex &&
+ (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the ready bit.
+ */
+ clear_bit(STATUS_READY, &trans->shrd->status);
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ wake_up(&priv->shrd->wait_command_queue);
+ IWL_ERR(trans, "RF is used by WiMAX\n");
+ return;
+ }
+
+ IWL_ERR(trans, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
+ iwl_dump_nic_error_log(trans);
+ iwl_dump_csr(trans);
+ iwl_dump_fh(trans, NULL, false);
+ iwl_dump_nic_event_log(trans, false, NULL, false);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
+ iwl_print_rx_config_cmd(priv(trans), IWL_RXON_CTX_BSS);
+#endif
+
+ iwlagn_fw_error(priv, false);
+}
+
+#define EVENT_START_OFFSET (4 * sizeof(u32))
+
+/**
+ * iwl_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ u32 i;
+ u32 base; /* SRAM byte address of event log header */
+ u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+ struct iwl_priv *priv = priv(trans);
+
+ if (num_events == 0)
+ return pos;
+
+ base = priv->device_pointers.log_event_table;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ if (!base)
+ base = priv->init_evtlog_ptr;
+ } else {
+ if (!base)
+ base = priv->inst_evtlog_ptr;
+ }
+
+ if (mode == 0)
+ event_size = 2 * sizeof(u32);
+ else
+ event_size = 3 * sizeof(u32);
+
+ ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags);
+ iwl_grab_nic_access(bus(trans));
+
+ /* Set starting address; reads will auto-increment */
+ iwl_write32(bus(trans), HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /* "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing. */
+ for (i = 0; i < num_events; i++) {
+ ev = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ time = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ /* data, ev */
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
+ } else {
+ data = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
+ }
+ }
+
+ /* Allow device to power down */
+ iwl_release_nic_access(bus(trans));
+ spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags);
+ return pos;
+}
+
+/**
+ * iwl_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ /*
+ * display the newest DEFAULT_LOG_ENTRIES entries
+ * i.e the entries just before the next ont that uCode would fill.
+ */
+ if (num_wraps) {
+ if (next_entry < size) {
+ pos = iwl_print_event_log(trans,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl_print_event_log(trans, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
+ } else
+ pos = iwl_print_event_log(trans, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ } else {
+ if (next_entry < size) {
+ pos = iwl_print_event_log(trans, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl_print_event_log(trans, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
+ }
+ return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
+ char **buf, bool display)
+{
+ u32 base; /* SRAM byte address of event log header */
+ u32 capacity; /* event log capacity in # entries */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+ u32 size; /* # entries that we'll print */
+ u32 logsize;
+ int pos = 0;
+ size_t bufsz = 0;
+ struct iwl_priv *priv = priv(trans);
+
+ base = priv->device_pointers.log_event_table;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ logsize = priv->init_evtlog_size;
+ if (!base)
+ base = priv->init_evtlog_ptr;
+ } else {
+ logsize = priv->inst_evtlog_size;
+ if (!base)
+ base = priv->inst_evtlog_ptr;
+ }
+
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_ERR(trans,
+ "Invalid event log pointer 0x%08X for %s uCode\n",
+ base,
+ (priv->ucode_type == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return -EINVAL;
+ }
+
+ /* event log header */
+ capacity = iwl_read_targ_mem(bus(trans), base);
+ mode = iwl_read_targ_mem(bus(trans), base + (1 * sizeof(u32)));
+ num_wraps = iwl_read_targ_mem(bus(trans), base + (2 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(bus(trans), base + (3 * sizeof(u32)));
+
+ if (capacity > logsize) {
+ IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
+ "entries\n", capacity, logsize);
+ capacity = logsize;
+ }
+
+ if (next_entry > logsize) {
+ IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
+ next_entry, logsize);
+ next_entry = logsize;
+ }
+
+ size = num_wraps ? capacity : next_entry;
+
+ /* bail out if nothing in log */
+ if (size == 0) {
+ IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
+ return pos;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+ IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
+ size);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
+ if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
+ /*
+ * if uCode has wrapped back to top of log,
+ * start at the oldest entry,
+ * i.e the next one that uCode would fill.
+ */
+ if (num_wraps)
+ pos = iwl_print_event_log(trans, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
+ /* (then/else) start at top of log */
+ pos = iwl_print_event_log(trans, 0,
+ next_entry, mode, pos, buf, bufsz);
+ } else
+ pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#else
+ pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#endif
+ return pos;
+}
+
+/* tasklet for iwlagn interrupt */
+void iwl_irq_tasklet(struct iwl_trans *trans)
+{
+ u32 inta = 0;
+ u32 handled = 0;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_mask;
+#endif
+
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ */
+ /* There is a hardware bug in the interrupt mask function that some
+ * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+ * they are disabled in the CSR_INT_MASK register. Furthermore the
+ * ICT interrupt handling mechanism has another bug that might cause
+ * these unmasked interrupts fail to be detected. We workaround the
+ * hardware bugs here by ACKing all the possible interrupts so that
+ * interrupt coalescing can still be achieved.
+ */
+ iwl_write32(bus(trans), CSR_INT,
+ trans_pcie->inta | ~trans_pcie->inta_mask);
+
+ inta = trans_pcie->inta;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
+ /* just for debug */
+ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
+ inta, inta_mask);
+ }
+#endif
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* saved interrupt in inta variable now we can reset trans_pcie->inta */
+ trans_pcie->inta = 0;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(trans, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ isr_stats->hw++;
+ iwl_irq_handle_error(trans);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
+ "the frame/frames.\n");
+ isr_stats->sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ isr_stats->rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
+ if (hw_rf_kill)
+ set_bit(STATUS_RF_KILL_HW,
+ &trans->shrd->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW,
+ &trans->shrd->status);
+ iwl_set_hw_rfkill_state(priv(trans), hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(trans, "Microcode CT kill error detected.\n");
+ isr_stats->ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(trans, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ isr_stats->sw++;
+ iwl_irq_handle_error(trans);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
+ for (i = 0; i < hw_params(trans).max_txq_num; i++)
+ iwl_txq_update_write_ptr(trans,
+ &trans_pcie->txq[i]);
+
+ isr_stats->wakeup++;
+
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+ CSR_INT_BIT_RX_PERIODIC)) {
+ IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS,
+ CSR_FH_INT_RX_MASK);
+ }
+ if (inta & CSR_INT_BIT_RX_PERIODIC) {
+ handled |= CSR_INT_BIT_RX_PERIODIC;
+ iwl_write32(bus(trans),
+ CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+ }
+ /* Sending RX interrupt require many steps to be done in the
+ * the device:
+ * 1- write interrupt to current index in ICT table.
+ * 2- dma RX frame.
+ * 3- update RX shared data to indicate last write index.
+ * 4- send interrupt.
+ * This could lead to RX race, driver could receive RX interrupt
+ * but the shared data changes does not reflect this;
+ * periodic interrupt will detect any dangling Rx activity.
+ */
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_DIS);
+ iwl_rx_handle(trans);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+ iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_ENA);
+
+ isr_stats->rx++;
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+ IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+ isr_stats->tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ priv(trans)->ucode_write_complete = 1;
+ wake_up(&trans->shrd->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ isr_stats->unhandled++;
+ }
+
+ if (inta & ~(trans_pcie->inta_mask)) {
+ IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~trans_pcie->inta_mask);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
+ iwl_enable_interrupts(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv(trans));
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->ict_tbl_vir) {
+ dma_free_coherent(bus(trans)->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ trans_pcie->ict_tbl_vir,
+ trans_pcie->ict_tbl_dma);
+ trans_pcie->ict_tbl_vir = NULL;
+ memset(&trans_pcie->ict_tbl_dma, 0,
+ sizeof(trans_pcie->ict_tbl_dma));
+ memset(&trans_pcie->aligned_ict_tbl_dma, 0,
+ sizeof(trans_pcie->aligned_ict_tbl_dma));
+ }
+}
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* allocate shrared data table */
+ trans_pcie->ict_tbl_vir =
+ dma_alloc_coherent(bus(trans)->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
+ if (!trans_pcie->ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundary */
+ trans_pcie->aligned_ict_tbl_dma =
+ ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
+ (int)(trans_pcie->aligned_ict_tbl_dma -
+ trans_pcie->ict_tbl_dma));
+
+ trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
+ (trans_pcie->aligned_ict_tbl_dma -
+ trans_pcie->ict_tbl_dma);
+
+ IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
+ trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
+ (int)(trans_pcie->aligned_ict_tbl_dma -
+ trans_pcie->ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(trans_pcie->ict_tbl_vir, 0,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ trans_pcie->ict_index = 0;
+
+ /* add periodic RX interrupt */
+ trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_trans *trans)
+{
+ u32 val;
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+
+ memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+
+ val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val,
+ (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
+
+ iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
+ trans_pcie->use_ict = true;
+ trans_pcie->ict_index = 0;
+ iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask);
+ iwl_enable_interrupts(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ return 0;
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ trans_pcie->use_ict = false;
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_trans *trans = data;
+ struct iwl_trans_pcie *trans_pcie;
+ u32 inta, inta_mask;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!trans)
+ return IRQ_NONE;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(bus(trans), CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ trans_pcie->inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&trans_pcie->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta)
+ iwl_enable_interrupts(trans);
+
+ unplugged:
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta)
+ iwl_enable_interrupts(trans);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_trans *trans = data;
+ struct iwl_trans_pcie *trans_pcie;
+ u32 inta, inta_mask;
+ u32 val = 0;
+ unsigned long flags;
+
+ if (!trans)
+ return IRQ_NONE;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!trans_pcie->use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+
+ val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index,
+ le32_to_cpu(
+ trans_pcie->ict_tbl[trans_pcie->ict_index]));
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= trans_pcie->inta_mask;
+ trans_pcie->inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&trans_pcie->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(trans);
+ }
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ !trans_pcie->inta)
+ iwl_enable_interrupts(trans);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 222d410c586..4a0c95302a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -29,34 +29,42 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <net/mac80211.h>
-#include "iwl-agn.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-trans-int-pcie.h"
+#include "iwl-agn-hw.h"
+#include "iwl-trans-pcie-int.h"
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
/**
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
int write_ptr = txq->q.write_ptr;
int txq_id = txq->q.id;
u8 sec_ctl = 0;
u8 sta_id = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
+
+ scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+ sta_id = tx_cmd->sta_id;
+ sec_ctl = tx_cmd->sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
case TX_CMD_SEC_CCM:
@@ -82,7 +90,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
-void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
{
u32 reg = 0;
int txq_id = txq->q.id;
@@ -90,28 +98,28 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
if (txq->need_update == 0)
return;
- if (priv->cfg->base_params->shadow_reg_enable) {
+ if (hw_params(trans).shadow_reg_enable) {
/* shadow register enabled */
- iwl_write32(priv, HBUS_TARG_WRPTR,
+ iwl_write32(bus(trans), HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
} else {
/* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
/* wake up nic if it's powered down ...
* uCode will wake up, and interrupt us again, so next
* time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+ reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
+ IWL_DEBUG_INFO(trans,
"Tx queue %d requesting wakeup,"
" GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
+ iwl_set_bit(bus(trans), CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
return;
}
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
/*
@@ -120,7 +128,7 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
* trying to tx (during RFKILL, we're not trying to tx).
*/
} else
- iwl_write32(priv, HBUS_TARG_WRPTR,
+ iwl_write32(bus(trans), HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
}
txq->need_update = 0;
@@ -165,7 +173,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
return tfd->num_tbs & 0x1f;
}
-static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
+static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
{
int i;
@@ -175,56 +183,59 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
num_tbs = iwl_tfd_get_num_tbs(tfd);
if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
}
/* Unmap tx_cmd */
if (num_tbs)
- dma_unmap_single(priv->bus->dev,
+ dma_unmap_single(bus(trans)->dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
- dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i),
+ dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
/**
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
+ * @trans - transport private data
* @txq - tx queue
* @index - the index of the TFD to be freed
+ *@dma_dir - the direction of the DMA mapping
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int index)
+void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int index, enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
- iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
- DMA_TO_DEVICE);
+ iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
/* free SKB */
- if (txq->txb) {
+ if (txq->skbs) {
struct sk_buff *skb;
- skb = txq->txb[index].skb;
+ skb = txq->skbs[index];
- /* can be called from irqs-disabled context */
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[index].skb = NULL;
+ iwl_free_skb(priv(trans), skb);
+ txq->skbs[index] = NULL;
}
}
}
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len,
u8 reset)
@@ -244,7 +255,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
IWL_NUM_OF_TBS);
return -EINVAL;
}
@@ -253,7 +264,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
return -EINVAL;
if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
+ IWL_ERR(trans, "Unaligned address = %llx\n",
(unsigned long long)addr);
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
@@ -302,8 +313,7 @@ int iwl_queue_space(const struct iwl_queue *q)
/**
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id)
+int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
{
q->n_bd = count;
q->n_window = slots_num;
@@ -332,25 +342,23 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
return 0;
}
-/*TODO: this functions should NOT be exported from trans module - export it
- * until the reclaim flow will be brought to the transport module too.
- * Add a declaration to make sparse happy */
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
int txq_id = txq->q.id;
int read_ptr = txq->q.read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != priv->cmd_queue)
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
+ if (txq_id != trans->shrd->cmd_queue)
+ sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
@@ -360,56 +368,61 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
-static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
u16 txq_id)
{
u32 tbl_dw_addr;
u32 tbl_dw;
u16 scd_q2ratid;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
- tbl_dw_addr = priv->scd_base_addr +
+ tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
+ tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+ iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
return 0;
}
-static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
+ iwl_write_prph(bus(trans),
SCD_QUEUE_STATUS_BITS(txq_id),
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
-void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
+void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
int txq_id, u32 index)
{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
+ iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
}
-void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id = txq->q.id;
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+ int active =
+ test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
- iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id),
+ iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
@@ -417,55 +430,75 @@ void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
txq->sched_retry = scd_retry;
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
+ IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
-void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit)
+static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
+ u8 ctx, u16 tid)
+{
+ const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, int frame_limit)
{
int tx_fifo, txq_id, ssn_idx;
u16 ra_tid;
unsigned long flags;
struct iwl_tid_data *tid_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
if (WARN_ON(sta_id == IWL_INVALID_STATION))
return;
- if (WARN_ON(tid >= MAX_TID_COUNT))
+ if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
return;
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
+ tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
+ if (WARN_ON(tx_fifo < 0)) {
+ IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
+ return;
+ }
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
ssn_idx = SEQ_TO_SN(tid_data->seq_number);
txq_id = tid_data->agg.txq_id;
- tx_fifo = tid_data->agg.tx_fifo;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
ra_tid = BUILD_RAxTID(sta_id, tid);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&trans->shrd->lock, flags);
/* Stop this Tx queue before configuring it */
- iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
/* Map receiver-address / traffic-ID to this queue */
- iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+ iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
+ iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
/* enable aggregations for the queue */
- iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id));
+ iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
+ trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
sizeof(u32),
((frame_limit <<
@@ -475,40 +508,161 @@ void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
+ tx_fifo, 1);
+
+ trans_pcie->txq[txq_id].sta_id = sta_id;
+ trans_pcie->txq[txq_id].tid = tid;
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+}
- spin_unlock_irqrestore(&priv->lock, flags);
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id,
+ &trans_pcie->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
}
-int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ int txq_id;
+
+ txq_id = iwlagn_txq_ctx_activate_free(trans);
+ if (txq_id == -1) {
+ IWL_ERR(trans, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
+
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(trans, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ } else {
+ IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
+ "queue\n", tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+
+ return 0;
+}
+
+void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
+
+ trans_pcie->txq[txq_id].q.read_ptr = 0;
+ trans_pcie->txq[txq_id].q.write_ptr = 0;
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(trans_pcie, txq_id);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
+}
+
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+ int read_ptr, write_ptr;
+ struct iwl_tid_data *tid_data;
+ int txq_id;
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ txq_id = tid_data->agg.txq_id;
+
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
(IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_ERR(priv,
+ hw_params(trans).num_ampdu_queues <= txq_id)) {
+ IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
+ hw_params(trans).num_ampdu_queues - 1);
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return -EINVAL;
}
- iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+ switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(trans, "Stopping AGG while state not ON "
+ "or starting for %d on %d (%d)\n", sta_id, tid,
+ trans->shrd->tid_data[sta_id][tid].agg.state);
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ return 0;
+ }
- iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
+ write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
+ read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
+ trans->shrd->tid_data[sta_id][tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_HT(trans, "HW queue is empty\n");
+turn_off:
+ trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&trans->shrd->sta_lock);
+ spin_lock(&trans->shrd->lock);
- iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+ iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
return 0;
}
@@ -524,9 +678,10 @@ int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
* failed. On success, it turns the index (> 0) of command in the
* command queue.
*/
-static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
@@ -544,14 +699,14 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
int trace_idx;
#endif
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_WARN(priv, "fw recovery, no hcmd send\n");
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_WARN(trans, "fw recovery, no hcmd send\n");
return -EIO;
}
- if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
+ if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
!(cmd->flags & CMD_ON_DEMAND)) {
- IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
+ IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
return -EIO;
}
@@ -584,22 +739,22 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
return -EINVAL;
- if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
- IWL_WARN(priv, "Not sending command - %s KILL\n",
- iwl_is_rfkill(priv) ? "RF" : "CT");
+ if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
+ IWL_WARN(trans, "Not sending command - %s KILL\n",
+ iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
return -EIO;
}
- spin_lock_irqsave(&priv->hcmd_lock, flags);
+ spin_lock_irqsave(&trans->hcmd_lock, flags);
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
+ spin_unlock_irqrestore(&trans->hcmd_lock, flags);
- IWL_ERR(priv, "No space in command queue\n");
- is_ct_kill = iwl_check_for_ct_kill(priv);
+ IWL_ERR(trans, "No space in command queue\n");
+ is_ct_kill = iwl_check_for_ct_kill(priv(trans));
if (!is_ct_kill) {
- IWL_ERR(priv, "Restarting adapter due to queue full\n");
- iwlagn_fw_error(priv, false);
+ IWL_ERR(trans, "Restarting adapter queue is full\n");
+ iwlagn_fw_error(priv(trans), false);
}
return -ENOSPC;
}
@@ -611,19 +766,18 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
- if (cmd->flags & CMD_ASYNC)
- out_meta->callback = cmd->callback;
/* set up the header */
out_cmd->hdr.cmd = cmd->id;
out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
/* and copy the data that needs to be copied */
- cmd_dest = &out_cmd->cmd.payload[0];
+ cmd_dest = out_cmd->payload;
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
@@ -633,16 +787,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
cmd_dest += cmd->len[i];
}
- IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
+ IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
"%d bytes at %d[%d]:%d\n",
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, priv->cmd_queue);
+ q->write_ptr, idx, trans->shrd->cmd_queue);
- phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
+ phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
+ if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
}
@@ -650,7 +804,8 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, copy_size);
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
+ iwlagn_txq_attach_buf_to_tfd(trans, txq,
+ phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_bufs[0] = &out_cmd->hdr;
trace_lens[0] = copy_size;
@@ -662,17 +817,18 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
- phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
+ phys_addr = dma_map_single(bus(trans)->dev,
+ (void *)cmd->data[i],
cmd->len[i], DMA_BIDIRECTIONAL);
- if (dma_mapping_error(priv->bus->dev, phys_addr)) {
- iwlagn_unmap_tfd(priv, out_meta,
+ if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
+ iwlagn_unmap_tfd(trans, out_meta,
&txq->tfds[q->write_ptr],
DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_bufs[trace_idx] = cmd->data[i];
@@ -688,7 +844,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* check that tracing gets all possible blocks */
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_iwlwifi_dev_hcmd(priv, cmd->flags,
+ trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
trace_bufs[0], trace_lens[0],
trace_bufs[1], trace_lens[1],
trace_bufs[2], trace_lens[2]);
@@ -696,10 +852,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(trans, txq);
out:
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
+ spin_unlock_irqrestore(&trans->hcmd_lock, flags);
return idx;
}
@@ -710,14 +866,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* need to be reclaimed. As result, some free space forms. If there is
* enough free space (> low mark), wake the stack that feeds us.
*/
-static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
+static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
+ int idx)
{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
int nfreed = 0;
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
+ IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
"index %d is out of range [0-%d] %d %d.\n", __func__,
txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
return;
@@ -727,9 +885,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
if (nfreed++ > 0) {
- IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
q->write_ptr, q->read_ptr);
- iwlagn_fw_error(priv, false);
+ iwlagn_fw_error(priv(trans), false);
}
}
@@ -738,12 +896,15 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
/**
* iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
+ * @handler_status: return value of the handler of the command
+ * (put in setup_rx_handlers)
*
* If an Rx buffer has an async callback associated with it the callback
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
-void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
+ int handler_status)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -752,18 +913,19 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
int cmd_index;
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
unsigned long flags;
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != priv->cmd_queue,
+ if (WARN(txq_id != trans->shrd->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, priv->cmd_queue, sequence,
- priv->txq[priv->cmd_queue].q.read_ptr,
- priv->txq[priv->cmd_queue].q.write_ptr)) {
- iwl_print_hex_error(priv, pkt, 32);
+ txq_id, trans->shrd->cmd_queue, sequence,
+ trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
+ iwl_print_hex_error(trans, pkt, 32);
return;
}
@@ -773,140 +935,40 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
txq->time_stamp = jiffies;
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+ iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
+ DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ meta->source->handler_status = handler_status;
rxb->page = NULL;
- } else if (meta->callback)
- meta->callback(priv, cmd, pkt);
+ }
- spin_lock_irqsave(&priv->hcmd_lock, flags);
+ spin_lock_irqsave(&trans->hcmd_lock, flags);
- iwl_hcmd_queue_reclaim(priv, txq_id, index);
+ iwl_hcmd_queue_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
+ if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
+ IWL_WARN(trans,
+ "HCMD_ACTIVE already clear for command %s\n",
+ get_cmd_string(cmd->hdr.cmd));
+ }
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->hdr.cmd));
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&trans->shrd->wait_command_queue);
}
meta->flags = 0;
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-
-const char *get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_REMOVE_ALL_STA);
- IWL_CMD(REPLY_TXFIFO_FLUSH);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(COEX_PRIORITY_TABLE_CMD);
- IWL_CMD(COEX_MEDIUM_NOTIFICATION);
- IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(REPLY_QUIET_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
- IWL_CMD(QUIET_NOTIFICATION);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(MEASURE_ABORT_NOTIFICATION);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(REPLY_CARD_STATE_CMD);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- IWL_CMD(CALIBRATION_CFG_CMD);
- IWL_CMD(CALIBRATION_RES_NOTIFICATION);
- IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
- IWL_CMD(REPLY_TX_POWER_DBM_CMD);
- IWL_CMD(TEMPERATURE_NOTIFICATION);
- IWL_CMD(TX_ANT_CONFIGURATION_CMD);
- IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
- IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
- IWL_CMD(REPLY_BT_COEX_PROT_ENV);
- IWL_CMD(REPLY_WIPAN_PARAMS);
- IWL_CMD(REPLY_WIPAN_RXON);
- IWL_CMD(REPLY_WIPAN_RXON_TIMING);
- IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
- IWL_CMD(REPLY_WIPAN_QOS_PARAM);
- IWL_CMD(REPLY_WIPAN_WEPKEY);
- IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
- IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
- IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
- IWL_CMD(REPLY_WOWLAN_PATTERNS);
- IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
- IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
- IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
- IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
- IWL_CMD(REPLY_WOWLAN_GET_STATUS);
- default:
- return "UNKNOWN";
-
- }
+ spin_unlock_irqrestore(&trans->hcmd_lock, flags);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-static void iwl_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
int ret;
@@ -914,81 +976,83 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
return -EINVAL;
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_generic_cmd_callback;
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
return -EBUSY;
- ret = iwl_enqueue_hcmd(priv, cmd);
+ ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
return 0;
}
-static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
- lockdep_assert_held(&priv->mutex);
+ lockdep_assert_held(&trans->shrd->mutex);
- /* A synchronous command can not have a callback set. */
- if (WARN_ON(cmd->callback))
- return -EINVAL;
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
+ set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
- cmd_idx = iwl_enqueue_hcmd(priv, cmd);
+ cmd_idx = iwl_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
+ ret = wait_event_timeout(trans->shrd->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
+ if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
+ struct iwl_tx_queue *txq =
+ &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+
+ IWL_ERR(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
+ IWL_ERR(trans,
+ "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+
+ clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
"%s\n", get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
}
}
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
+ if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
get_cmd_string(cmd->id));
ret = -ECANCELED;
goto fail;
}
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s failed: FW Error\n",
get_cmd_string(cmd->id));
ret = -EIO;
goto fail;
}
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
ret = -EIO;
goto cancel;
@@ -1004,35 +1068,74 @@ cancel:
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
+ trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
fail:
if (cmd->reply_page) {
- iwl_free_pages(priv, cmd->reply_page);
+ iwl_free_pages(trans->shrd, cmd->reply_page);
cmd->reply_page = 0;
}
return ret;
}
-int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
if (cmd->flags & CMD_ASYNC)
- return iwl_send_cmd_async(priv, cmd);
+ return iwl_send_cmd_async(trans, cmd);
- return iwl_send_cmd_sync(priv, cmd);
+ return iwl_send_cmd_sync(trans, cmd);
}
-int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
- const void *data)
+/* Frees buffers until index _not_ inclusive */
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+ struct sk_buff_head *skbs)
{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = { len, },
- .data = { data, },
- .flags = flags,
- };
-
- return iwl_send_cmd(priv, &cmd);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int last_to_free;
+ int freed = 0;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans->shrd->cmd_queue))
+ return 0;
+
+ /*Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used */
+ last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
+
+ if ((index >= q->n_bd) ||
+ (iwl_queue_used(q, last_to_free) == 0)) {
+ IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
+ "last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
+ q->read_ptr, index);
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ return 0;
+
+ for (;
+ q->read_ptr != index;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
+ continue;
+
+ __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
+
+ txq->skbs[txq->q.read_ptr] = NULL;
+
+ iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
+
+ iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+ freed++;
+ }
+ return freed;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
new file mode 100644
index 00000000000..8e8c75c997e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -0,0 +1,1998 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include "iwl-trans.h"
+#include "iwl-trans-pcie-int.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-shared.h"
+#include "iwl-eeprom.h"
+#include "iwl-agn-hw.h"
+
+static int iwl_trans_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct device *dev = bus(trans)->dev;
+
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+ spin_lock_init(&rxq->lock);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+ memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
+}
+
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ int i;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page,
+ hw_params(trans).rx_page_order);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+}
+
+static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
+ struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ if (iwlagn_mod_params.amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+static int iwl_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
+ int i, err;
+ unsigned long flags;
+
+ if (!rxq->bd) {
+ err = iwl_trans_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ iwl_trans_rxq_free_rx_bufs(trans);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ iwlagn_rx_replenish(trans);
+
+ iwl_trans_rx_hw_init(trans, rxq);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(trans, rxq);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
+ unsigned long flags;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ iwl_trans_rxq_free_rx_bufs(trans);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(bus(trans)->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+ memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts = NULL;
+}
+
+static int iwl_trans_rx_stop(struct iwl_trans *trans)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static int iwl_trans_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ int i;
+
+ if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
+ return -EINVAL;
+
+ txq->q.n_window = slots_num;
+
+ txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
+ txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto error;
+
+ if (txq_id == trans->shrd->cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto error;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ /* Driver private data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (txq_id != trans->shrd->cmd_queue) {
+ txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
+ GFP_KERNEL);
+ if (!txq->skbs) {
+ IWL_ERR(trans, "kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->skbs = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
+ &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = txq_id;
+
+ return 0;
+error:
+ kfree(txq->skbs);
+ txq->skbs = NULL;
+ /* since txq->cmd has been zeroed,
+ * all non allocated cmd[i] will be NULL */
+ if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->cmd[i]);
+ kfree(txq->meta);
+ kfree(txq->cmd);
+ txq->meta = NULL;
+ txq->cmd = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = 0;
+ memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ iwl_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+ txq_id);
+ if (ret)
+ return ret;
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/**
+ * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ enum dma_data_direction dma_dir;
+ unsigned long flags;
+
+ if (!q->n_bd)
+ return;
+
+ /* In the command queue, all the TBs are mapped as BIDI
+ * so unmap them as such.
+ */
+ if (txq_id == trans->shrd->cmd_queue)
+ dma_dir = DMA_BIDIRECTIONAL;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ while (q->write_ptr != q->read_ptr) {
+ /* The read_ptr needs to bound by q->n_window */
+ iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
+ dma_dir);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+}
+
+/**
+ * iwl_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ struct device *dev = bus(trans)->dev;
+ int i;
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_tx_queue_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+
+ if (txq_id == trans->shrd->cmd_queue)
+ for (i = 0; i < txq->q.n_window; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd) {
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ }
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->skbs);
+ txq->skbs = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+/**
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
+{
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Tx queues */
+ if (trans_pcie->txq) {
+ for (txq_id = 0;
+ txq_id < hw_params(trans).max_txq_num; txq_id++)
+ iwl_tx_queue_free(trans, txq_id);
+ }
+
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
+
+ iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
+
+ iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/**
+ * iwl_trans_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+static int iwl_trans_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
+ sizeof(struct iwl_tx_queue), GFP_KERNEL);
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
+ slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_trans_pcie_tx_free(trans);
+
+ return ret;
+}
+static int iwl_tx_init(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+ bool alloc = false;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->txq) {
+ ret = iwl_trans_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_write_prph(bus(trans), SCD_TXFACT, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
+ slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_trans_pcie_tx_free(trans);
+ return ret;
+}
+
+static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+ iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+static int iwl_nic_init(struct iwl_trans *trans)
+{
+ unsigned long flags;
+
+ /* nic_init */
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_apm_init(priv(trans));
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(bus(trans), CSR_INT_COALESCING,
+ IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ iwl_set_pwr_vmain(trans);
+
+ iwl_nic_config(priv(trans));
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ iwl_rx_init(trans);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_tx_init(trans))
+ return -ENOMEM;
+
+ if (hw_params(trans).shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
+ 0x800FFFFF);
+ }
+
+ set_bit(STATUS_INIT, &trans->shrd->status);
+
+ return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_set_hw_ready(struct iwl_trans *trans)
+{
+ int ret;
+
+ iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+
+ IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+ return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
+{
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+ ret = iwl_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ if (ret < 0)
+ return ret;
+
+ /* HW should be ready by now, check again. */
+ ret = iwl_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
+ return ret;
+}
+
+#define IWL_AC_UNSET -1
+
+struct queue_to_fifo_ac {
+ s8 fifo, ac;
+};
+
+static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+};
+
+static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_BE_IPAN, 2, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
+};
+
+static const u8 iwlagn_bss_ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+static const u8 iwlagn_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+static const u8 iwlagn_pan_ac_to_fifo[] = {
+ IWL_TX_FIFO_VO_IPAN,
+ IWL_TX_FIFO_VI_IPAN,
+ IWL_TX_FIFO_BE_IPAN,
+ IWL_TX_FIFO_BK_IPAN,
+};
+static const u8 iwlagn_pan_ac_to_queue[] = {
+ 7, 6, 5, 4,
+};
+
+static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
+{
+ int ret;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
+ trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
+ trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
+
+ trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
+ trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
+
+ trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
+ trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
+
+ if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
+ iwl_trans_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+
+ if (iwl_is_rfkill(trans->shrd)) {
+ iwl_set_hw_rfkill_state(priv(trans), true);
+ iwl_enable_interrupts(trans);
+ return -ERFKILL;
+ }
+
+ iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+ iwl_enable_interrupts(trans);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ return 0;
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->shrd->lock and mac access
+ */
+static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+{
+ iwl_write_prph(bus(trans), SCD_TXFACT, mask);
+}
+
+static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
+{
+ const struct queue_to_fifo_ac *queue_to_fifo;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
+ a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ /* reset conext data memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(bus(trans), a, 0);
+ /* reset tx status memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(bus(trans), a, 0);
+ for (; a < trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
+ a += 4)
+ iwl_write_targ_mem(bus(trans), a, 0);
+
+ iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
+ SCD_QUEUECHAIN_SEL_ALL(trans));
+ iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
+
+ /* initiate the queues */
+ for (i = 0; i < hw_params(trans).max_txq_num; i++) {
+ iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+ iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ }
+
+ iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
+ IWL_MASK(0, hw_params(trans).max_txq_num));
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
+
+ /* map queues to FIFOs */
+ if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
+ queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
+ else
+ queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
+
+ iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&trans_pcie->queue_stopped[0], 0,
+ sizeof(trans_pcie->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&trans_pcie->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ trans_pcie->txq_ctx_active_msk = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
+ IWLAGN_FIRST_AMPDU_QUEUE);
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
+ IWLAGN_FIRST_AMPDU_QUEUE);
+
+ for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
+ int fifo = queue_to_fifo[i].fifo;
+ int ac = queue_to_fifo[i].ac;
+
+ iwl_txq_ctx_activate(trans_pcie, i);
+
+ if (fifo == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ if (ac != IWL_AC_UNSET)
+ iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
+ fifo, 0);
+ }
+
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+static int iwl_trans_tx_stop(struct iwl_trans *trans)
+{
+ int ch, txq_id;
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+
+ iwl_trans_txq_set_sched(trans, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write_direct32(bus(trans),
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000))
+ IWL_ERR(trans, "Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ iwl_read_direct32(bus(trans),
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ if (!trans_pcie->txq) {
+ IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+ return 0;
+ }
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ iwl_tx_queue_unmap(trans, txq_id);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
+{
+ unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(bus(trans)->irq);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+}
+
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+{
+ /* stop and reset the on-board processor */
+ iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ iwl_trans_pcie_disable_sync_irq(trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
+ iwl_trans_tx_stop(trans);
+ iwl_trans_rx_stop(trans);
+
+ /* Power-down device's busmaster DMA clocks */
+ iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_apm_stop(priv(trans));
+}
+
+static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
+ u8 sta_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+
+ dma_addr_t phys_addr = 0;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ u8 wait_write_ptr = 0;
+ u8 txq_id;
+ u8 tid = 0;
+ bool is_agg = false;
+ __le16 fc = hdr->frame_control;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = trans_pcie->mcast_queue[ctx];
+
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ txq_id = IWL_AUX_QUEUE;
+ else
+ txq_id =
+ trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
+
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ struct iwl_tid_data *tid_data;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -1;
+
+ seq_number = tid_data->seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
+ txq_id = tid_data->agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ txq = &trans_pcie->txq[txq_id];
+ q = &txq->q;
+
+ /* Set up driver data for this TFD */
+ txq->skbs[q->write_ptr] = skb;
+ txq->cmd[q->write_ptr] = dev_cmd;
+
+ dev_cmd->hdr.cmd = REPLY_TX;
+ dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->meta[q->write_ptr];
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = dma_map_single(bus(trans)->dev,
+ &dev_cmd->hdr, firstlen,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
+ return -1;
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+
+ if (!ieee80211_has_morefrags(fc)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
+ secondlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
+ dma_unmap_single(bus(trans)->dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ DMA_BIDIRECTIONAL);
+ return -1;
+ }
+ }
+
+ /* Attach buffers to TFD */
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
+ if (secondlen > 0)
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+ secondlen, 0);
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+ le16_to_cpu(dev_cmd->hdr.sequence));
+ IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (is_agg)
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(priv(trans),
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(trans, txq);
+
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ trans->shrd->tid_data[sta_id][tid].seq_number =
+ seq_number;
+ }
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+ if (iwl_queue_space(q) < q->high_mark) {
+ if (wait_write_ptr) {
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(trans, txq);
+ } else {
+ iwl_stop_queue(trans, txq);
+ }
+ }
+ return 0;
+}
+
+static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
+{
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(bus(trans), CSR_RESET, 0);
+}
+
+static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ int err;
+
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
+ tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)trans);
+
+ iwl_alloc_isr_ict(trans);
+
+ err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
+ DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
+ iwl_free_isr_ict(trans);
+ return err;
+ }
+
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
+ return 0;
+}
+
+static int iwlagn_txq_check_empty(struct iwl_trans *trans,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
+ struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
+
+ lockdep_assert_held(&trans->shrd->sta_lock);
+
+ switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ IWL_DEBUG_HT(trans,
+ "HW queue empty: continue DELBA flow\n");
+ iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ tid_data->agg.state = IWL_AGG_OFF;
+ iwl_stop_tx_ba_trans_ready(priv(trans),
+ NUM_IWL_RXON_CTX,
+ sta_id, tid);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(trans,
+ "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ iwl_start_tx_ba_trans_ready(priv(trans),
+ NUM_IWL_RXON_CTX,
+ sta_id, tid);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
+ int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&trans->shrd->sta_lock);
+
+ if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
+ freed);
+ trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
+ }
+}
+
+static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+ int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+ enum iwl_agg_state agg_state;
+ /* n_bd is usually 256 => n_bd - 1 = 0xff */
+ int tfd_num = ssn & (txq->q.n_bd - 1);
+ int freed = 0;
+ bool cond;
+
+ txq->time_stamp = jiffies;
+
+ if (txq->sched_retry) {
+ agg_state =
+ trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
+ cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
+ } else {
+ cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
+ }
+
+ if (txq->q.read_ptr != tfd_num) {
+ IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
+ "scd_ssn=%d idx=%d txq=%d swq=%d\n",
+ ssn , tfd_num, txq_id, txq->swq_id);
+ freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
+ iwl_wake_queue(trans, txq);
+ }
+
+ iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
+ iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
+}
+
+static void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_tx_free(trans);
+ iwl_trans_pcie_rx_free(trans);
+ free_irq(bus(trans)->irq, trans);
+ iwl_free_isr_ict(trans);
+ trans->shrd->trans = NULL;
+ kfree(trans);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend
+ * function first but since iwlagn_mac_stop() has no knowledge of
+ * who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ *
+ * But of course ... if we have configured WoWLAN then we did other
+ * things already :-)
+ */
+ if (!trans->shrd->wowlan) {
+ iwl_apm_stop(priv(trans));
+ } else {
+ iwl_disable_interrupts(trans);
+ iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ }
+
+ return 0;
+}
+
+static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+ bool hw_rfkill = false;
+
+ iwl_enable_interrupts(trans);
+
+ if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+
+ iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx)
+{
+ u8 ac, txq_id;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ for (ac = 0; ac < AC_NUM; ac++) {
+ txq_id = trans_pcie->ac_to_queue[ctx][ac];
+ IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+ ac,
+ (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
+ ? "stopped" : "awake");
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ }
+}
+
+const struct iwl_trans_ops trans_ops_pcie;
+
+static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
+{
+ struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
+ sizeof(struct iwl_trans_pcie),
+ GFP_KERNEL);
+ if (iwl_trans) {
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+ iwl_trans->ops = &trans_ops_pcie;
+ iwl_trans->shrd = shrd;
+ trans_pcie->trans = iwl_trans;
+ spin_lock_init(&iwl_trans->hcmd_lock);
+ }
+
+ return iwl_trans;
+}
+
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+}
+
+#define IWL_FLUSH_WAIT_MS 2000
+
+static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ int cnt;
+ unsigned long now = jiffies;
+ int ret = 0;
+
+ /* waiting for all the tx frames complete might take a while */
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ if (cnt == trans->shrd->cmd_queue)
+ continue;
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ while (q->read_ptr != q->write_ptr && !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ msleep(1);
+
+ if (q->read_ptr != q->write_ptr) {
+ IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
+ struct iwl_queue *q = &txq->q;
+ unsigned long timeout;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout = txq->time_stamp +
+ msecs_to_jiffies(hw_params(trans).wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
+ hw_params(trans).wd_timeout);
+ IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(trans, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+ }
+ return 0;
+}
+
+static const char *get_csr_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void iwl_dump_csr(struct iwl_trans *trans)
+{
+ int i;
+ static const u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(trans, "CSR values:\n");
+ IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(trans, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(bus(trans), csr_tbl[i]));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops)) \
+ return -ENOMEM; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+
+static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = iwl_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
+
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n",
+ cnt, q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, trans_pcie->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&trans_pcie->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ u32 event_log_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &event_log_flag) != 1)
+ return -EFAULT;
+ if (event_log_flag == 1)
+ iwl_dump_nic_event_log(trans, true, NULL, false);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ int pos = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(trans, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ isr_stats->hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ isr_stats->sw);
+ if (isr_stats->sw || isr_stats->hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ isr_stats->err_code);
+ }
+#ifdef CONFIG_IWLWIFI_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ isr_stats->sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ isr_stats->alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ isr_stats->ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ isr_stats->wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n", isr_stats->rx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ isr_stats->tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ isr_stats->unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ memset(isr_stats, 0, sizeof(*isr_stats));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ iwl_dump_csr(trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ ret = pos = iwl_dump_fh(trans, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+ return 0;
+}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{ return 0; }
+
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+const struct iwl_trans_ops trans_ops_pcie = {
+ .alloc = iwl_trans_pcie_alloc,
+ .request_irq = iwl_trans_pcie_request_irq,
+ .start_device = iwl_trans_pcie_start_device,
+ .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
+ .stop_device = iwl_trans_pcie_stop_device,
+
+ .tx_start = iwl_trans_pcie_tx_start,
+ .wake_any_queue = iwl_trans_pcie_wake_any_queue,
+
+ .send_cmd = iwl_trans_pcie_send_cmd,
+
+ .tx = iwl_trans_pcie_tx,
+ .reclaim = iwl_trans_pcie_reclaim,
+
+ .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
+ .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
+ .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
+
+ .kick_nic = iwl_trans_pcie_kick_nic,
+
+ .free = iwl_trans_pcie_free,
+ .stop_queue = iwl_trans_pcie_stop_queue,
+
+ .dbgfs_register = iwl_trans_pcie_dbgfs_register,
+
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+ .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
+
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwl_trans_pcie_suspend,
+ .resume = iwl_trans_pcie_resume,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
deleted file mode 100644
index 47486029040..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
+++ /dev/null
@@ -1,979 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/gfp.h>
-
-#include "iwl-dev.h"
-#include "iwl-agn.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-trans-int-pcie.h"
-
-/******************************************************************************
- *
- * RX path functions
- *
- ******************************************************************************/
-
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_rx_queue_space - Return number of free slots available in queue.
- */
-static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-
-/**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
- } else {
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
- }
- }
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-/**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s."
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = dma_map_page(priv->bus->dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwlagn_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwlagn_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwlagn_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwlagn_rx_replenish_now(struct iwl_priv *priv)
-{
- iwlagn_rx_allocate(priv, GFP_ATOMIC);
-
- iwlagn_rx_queue_restock(priv);
-}
-
-void iwl_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwlagn_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- if (WARN_ON(rxb == NULL)) {
- i = (i + 1) & RX_QUEUE_MASK;
- continue;
- }
-
- rxq->queue[i] = NULL;
-
- dma_unmap_page(priv->bus->dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- iwl_rx_dispatch(priv, rxb);
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking
- * trans_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwlagn_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwlagn_rx_replenish_now(priv);
- else
- iwlagn_rx_queue_restock(priv);
-}
-
-/* tasklet for iwlagn interrupt */
-void iwl_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta = 0;
- u32 handled = 0;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- */
- /* There is a hardware bug in the interrupt mask function that some
- * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
- * they are disabled in the CSR_INT_MASK register. Furthermore the
- * ICT interrupt handling mechanism has another bug that might cause
- * these unmasked interrupts fail to be detected. We workaround the
- * hardware bugs here by ACKing all the possible interrupts so that
- * interrupt coalescing can still be achieved.
- */
- iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
-
- inta = priv->inta;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
- inta, inta_mask);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* saved interrupt in inta variable now we can reset priv->inta */
- priv->inta = 0;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_txq_update_write_ptr(priv, &priv->txq[i]);
-
- priv->isr_stats.wakeup++;
-
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
- CSR_INT_BIT_RX_PERIODIC)) {
- IWL_DEBUG_ISR(priv, "Rx interrupt\n");
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- iwl_write32(priv, CSR_FH_INT_STATUS,
- CSR_FH_INT_RX_MASK);
- }
- if (inta & CSR_INT_BIT_RX_PERIODIC) {
- handled |= CSR_INT_BIT_RX_PERIODIC;
- iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
- }
- /* Sending RX interrupt require many steps to be done in the
- * the device:
- * 1- write interrupt to current index in ICT table.
- * 2- dma RX frame.
- * 3- update RX shared data to indicate last write index.
- * 4- send interrupt.
- * This could lead to RX race, driver could receive RX interrupt
- * but the shared data changes does not reflect this;
- * periodic interrupt will detect any dangling Rx activity.
- */
-
- /* Disable periodic interrupt; we use it as just a one-shot. */
- iwl_write8(priv, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_DIS);
- iwl_rx_handle(priv);
-
- /*
- * Enable periodic interrupt in 8 msec only if we received
- * real RX interrupt (instead of just periodic int), to catch
- * any dangling Rx interrupt. If it was just the periodic
- * interrupt, there was no dangling Rx activity, and no need
- * to extend the periodic interrupt; one-shot is enough.
- */
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
- iwl_write8(priv, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_ENA);
-
- priv->isr_stats.rx++;
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up_interruptible(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(priv);
-}
-
-/******************************************************************************
- *
- * ICT functions
- *
- ******************************************************************************/
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
-
-/* Free dram table */
-void iwl_free_isr_ict(struct iwl_priv *priv)
-{
- if (priv->ict_tbl_vir) {
- dma_free_coherent(priv->bus->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- priv->ict_tbl_vir,
- priv->ict_tbl_dma);
- priv->ict_tbl_vir = NULL;
- memset(&priv->ict_tbl_dma, 0,
- sizeof(priv->ict_tbl_dma));
- memset(&priv->aligned_ict_tbl_dma, 0,
- sizeof(priv->aligned_ict_tbl_dma));
- }
-}
-
-
-/* allocate dram shared table it is a PAGE_SIZE aligned
- * also reset all data related to ICT table interrupt.
- */
-int iwl_alloc_isr_ict(struct iwl_priv *priv)
-{
-
- /* allocate shrared data table */
- priv->ict_tbl_vir =
- dma_alloc_coherent(priv->bus->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma, GFP_KERNEL);
- if (!priv->ict_tbl_vir)
- return -ENOMEM;
-
- /* align table to PAGE_SIZE boundary */
- priv->aligned_ict_tbl_dma =
- ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)priv->ict_tbl_dma,
- (unsigned long long)priv->aligned_ict_tbl_dma,
- (int)(priv->aligned_ict_tbl_dma -
- priv->ict_tbl_dma));
-
- priv->ict_tbl = priv->ict_tbl_vir +
- (priv->aligned_ict_tbl_dma -
- priv->ict_tbl_dma);
-
- IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
- priv->ict_tbl, priv->ict_tbl_vir,
- (int)(priv->aligned_ict_tbl_dma -
- priv->ict_tbl_dma));
-
- /* reset table and index to all 0 */
- memset(priv->ict_tbl_vir, 0,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
- priv->ict_index = 0;
-
- /* add periodic RX interrupt */
- priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
- return 0;
-}
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-int iwl_reset_ict(struct iwl_priv *priv)
-{
- u32 val;
- unsigned long flags;
-
- if (!priv->ict_tbl_vir)
- return 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
-
- memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
-
- val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
-
- IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val,
- (unsigned long long)priv->aligned_ict_tbl_dma);
-
- iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
- priv->use_ict = true;
- priv->ict_index = 0;
- iwl_write32(priv, CSR_INT, priv->inta_mask);
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->use_ict = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static irqreturn_t iwl_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- priv->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
- !priv->inta)
- iwl_enable_interrupts(priv);
-
- unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_isr_ict(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 val = 0;
- unsigned long flags;
-
- if (!priv)
- return IRQ_NONE;
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (!priv->use_ict)
- return iwl_isr(irq, data);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!priv->ict_tbl[priv->ict_index]) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /* read all entries that not 0 start with ict_index */
- while (priv->ict_tbl[priv->ict_index]) {
-
- val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
- IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
- priv->ict_index,
- le32_to_cpu(
- priv->ict_tbl[priv->ict_index]));
- priv->ict_tbl[priv->ict_index] = 0;
- priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
- ICT_COUNT);
-
- }
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= priv->inta_mask;
- priv->inta |= inta;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
- !priv->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(priv);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 41f0de91400..1b20c4fb791 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -60,1113 +60,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#include "iwl-dev.h"
-#include "iwl-trans.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-trans-int-pcie.h"
-/*TODO remove uneeded includes when the transport layer tx_free will be here */
-#include "iwl-agn.h"
-#include "iwl-core.h"
-
-static int iwl_trans_rx_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = priv->bus->dev;
-
- memset(&priv->rxq, 0, sizeof(priv->rxq));
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- if (WARN_ON(rxq->bd || rxq->rb_stts))
- return -EINVAL;
-
- /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
- memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
-
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb_stts;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
-
- return 0;
-
-err_rb_stts:
- dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-err_bd:
- return -ENOMEM;
-}
-
-static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- int i;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- DMA_FROM_DEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-}
-
-static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
- struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- rb_timeout = RX_RB_TIMEOUT;
-
- if (iwlagn_mod_params.amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-}
-
-static int iwl_rx_init(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- int i, err;
- unsigned long flags;
-
- if (!rxq->bd) {
- err = iwl_trans_rx_alloc(priv);
- if (err)
- return err;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- iwl_trans_rxq_free_rx_bufs(priv);
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- iwlagn_rx_replenish(priv);
-
- iwl_trans_rx_hw_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static void iwl_trans_rx_free(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- unsigned long flags;
-
- /*if rxq->bd is NULL, it means that nothing has been allocated,
- * exit now */
- if (!rxq->bd) {
- IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- iwl_trans_rxq_free_rx_bufs(priv);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(priv->bus->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
- rxq->rb_stts = NULL;
-}
-
-static int iwl_trans_rx_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-}
-
-static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- if (WARN_ON(ptr->addr))
- return -EINVAL;
-
- ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
- &ptr->dma, GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
- int i;
-
- if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
- return -EINVAL;
-
- txq->q.n_window = slots_num;
-
- txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
- GFP_KERNEL);
-
- if (!txq->meta || !txq->cmd)
- goto error;
-
- for (i = 0; i < slots_num; i++) {
- txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->cmd[i])
- goto error;
- }
-
- /* Alloc driver data array and TFD circular buffer */
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (txq_id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = txq_id;
-
- return 0;
-error:
- kfree(txq->txb);
- txq->txb = NULL;
- /* since txq->cmd has been zeroed,
- * all non allocated cmd[i] will be NULL */
- if (txq->cmd)
- for (i = 0; i < slots_num; i++)
- kfree(txq->cmd[i]);
- kfree(txq->meta);
- kfree(txq->cmd);
- txq->meta = NULL;
- txq->cmd = NULL;
-
- return -ENOMEM;
-
-}
-
-static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int ret;
-
- txq->need_update = 0;
- memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
-
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_set_swq_id(txq, txq_id, txq_id);
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
- if (ret)
- return ret;
-
- /*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- * Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/**
- * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (!q->n_bd)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- /* The read_ptr needs to bound by q->n_window */
- iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-
-/**
- * iwl_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = priv->bus->dev;
- int i;
- if (WARN_ON(!txq))
- return;
-
- iwl_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < txq->q.n_window; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
- }
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * iwl_trans_tx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-static void iwl_trans_tx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_free(priv, txq_id);
- }
-
- kfree(priv->txq);
- priv->txq = NULL;
-
- iwlagn_free_dma_ptr(priv, &priv->kw);
-
- iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
-}
-
-/**
- * iwl_trans_tx_alloc - allocate TX context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-static int iwl_trans_tx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
-
- /*It is not allowed to alloc twice, so warn when this happens.
- * We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(priv->txq)) {
- ret = -EINVAL;
- goto error;
- }
-
- ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error;
- }
-
- /* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error;
- }
-
- priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues, GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- ret = ENOMEM;
- goto error;
- }
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-
-error:
- trans_tx_free(&priv->trans);
-
- return ret;
-}
-static int iwl_tx_init(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
- bool alloc = false;
-
- if (!priv->txq) {
- ret = iwl_trans_tx_alloc(priv);
- if (ret)
- goto error;
- alloc = true;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl_write_prph(priv, SCD_TXFACT, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-error:
- /*Upon error, free only if we allocated something */
- if (alloc)
- trans_tx_free(&priv->trans);
- return ret;
-}
-
-static void iwl_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-static int iwl_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_apm_init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_set_pwr_vmain(priv);
-
- priv->cfg->lib->nic_config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- iwl_rx_init(priv);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (iwl_tx_init(priv))
- return -ENOMEM;
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* enable shadow regs in HW */
- iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
- 0x800FFFFF);
- }
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-#define HW_READY_TIMEOUT (50)
-
-/* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_set_hw_ready(struct iwl_priv *priv)
-{
- int ret;
-
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
-
- IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
- return ret;
-}
-
-/* Note: returns standard 0/-ERROR code */
-static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret;
-
- IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
-
- ret = iwl_set_hw_ready(priv);
- if (ret >= 0)
- return 0;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- if (ret < 0)
- return ret;
-
- /* HW should be ready by now, check again. */
- ret = iwl_set_hw_ready(priv);
- if (ret >= 0)
- return 0;
- return ret;
-}
-
-static int iwl_trans_start_device(struct iwl_priv *priv)
-{
- int ret;
-
- priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
-
- if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
- iwl_trans_prepare_card_hw(priv)) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
- iwl_enable_interrupts(priv);
- return -ERFKILL;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- ret = iwl_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- return 0;
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, SCD_TXFACT, mask);
-}
-
-#define IWL_AC_UNSET -1
-
-struct queue_to_fifo_ac {
- s8 fifo, ac;
-};
-
-static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
- { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
- { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-};
-
-static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
- { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
- { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
- { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_BE_IPAN, 2, },
- { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
-};
-static void iwl_trans_tx_start(struct iwl_priv *priv)
-{
- const struct queue_to_fifo_ac *queue_to_fifo;
- struct iwl_rxon_context *ctx;
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- /* reset tx status memory */
- for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(priv));
- iwl_write_prph(priv, SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
- iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(priv, SCD_INTERRUPT_MASK,
- IWL_MASK(0, priv->hw_params.max_txq_num));
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
-
- /* map queues to FIFOs */
- if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
- queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
- else
- queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-
- iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
- for_each_context(priv, ctx)
- ctx->last_tx_rejected = false;
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
- BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
-
- for (i = 0; i < 10; i++) {
- int fifo = queue_to_fifo[i].fifo;
- int ac = queue_to_fifo[i].ac;
-
- iwl_txq_ctx_activate(priv, i);
-
- if (fifo == IWL_TX_FIFO_UNUSED)
- continue;
-
- if (ac != IWL_AC_UNSET)
- iwl_set_swq_id(&priv->txq[i], ac, i);
- iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Enable L1-Active */
- iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-}
-
-/**
- * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
- */
-static int iwl_trans_tx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwl_trans_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq) {
- IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
- return 0;
- }
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_unmap(priv, txq_id);
-
- return 0;
-}
-
-static void iwl_trans_stop_device(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- trans_sync_irq(&priv->trans);
-
- /* device going down, Stop using ICT table */
- iwl_disable_ict(priv);
-
- /*
- * If a HW restart happens during firmware loading,
- * then the firmware loading might call this function
- * and later it might be called again due to the
- * restart. So don't process again if the device is
- * already dead.
- */
- if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
- iwl_trans_tx_stop(priv);
- iwl_trans_rx_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
- }
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_apm_stop(priv);
-}
-
-static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
- int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *dev_cmd;
-
- if (unlikely(iwl_queue_space(q) < q->high_mark))
- return NULL;
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- dev_cmd = txq->cmd[q->write_ptr];
- memset(dev_cmd, 0, sizeof(*dev_cmd));
- dev_cmd->hdr.cmd = REPLY_TX;
- dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
- return &dev_cmd->cmd.tx;
-}
-
-static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
- struct iwl_cmd_meta *out_meta;
-
- dma_addr_t phys_addr = 0;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u8 wait_write_ptr = 0;
- u8 hdr_len = ieee80211_hdrlen(fc);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->meta[q->write_ptr];
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = dma_map_single(priv->bus->dev,
- &dev_cmd->hdr, firstlen,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
- return -1;
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
-
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
- secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
- dma_unmap_single(priv->bus->dev,
- dma_unmap_addr(out_meta, mapping),
- dma_unmap_len(out_meta, len),
- DMA_BIDIRECTIONAL);
- return -1;
- }
- }
-
- /* Attach buffers to TFD */
- iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
- if (secondlen > 0)
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
- secondlen, 0);
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (ampdu)
- iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &dev_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
- if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
- if (wait_write_ptr) {
- txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
- } else {
- iwl_stop_queue(priv, txq);
- }
- }
- return 0;
-}
-
-static void iwl_trans_kick_nic(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl_trans_sync_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->bus->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl_trans_free(struct iwl_priv *priv)
-{
- free_irq(priv->bus->irq, priv);
- iwl_free_isr_ict(priv);
-}
-
-static const struct iwl_trans_ops trans_ops = {
- .start_device = iwl_trans_start_device,
- .prepare_card_hw = iwl_trans_prepare_card_hw,
- .stop_device = iwl_trans_stop_device,
-
- .tx_start = iwl_trans_tx_start,
-
- .rx_free = iwl_trans_rx_free,
- .tx_free = iwl_trans_tx_free,
-
- .send_cmd = iwl_send_cmd,
- .send_cmd_pdu = iwl_send_cmd_pdu,
-
- .get_tx_cmd = iwl_trans_get_tx_cmd,
- .tx = iwl_trans_tx,
-
- .txq_agg_disable = iwl_trans_txq_agg_disable,
- .txq_agg_setup = iwl_trans_txq_agg_setup,
-
- .kick_nic = iwl_trans_kick_nic,
-
- .sync_irq = iwl_trans_sync_irq,
- .free = iwl_trans_free,
-};
+#include "iwl-trans.h"
-int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
+int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
+ u32 flags, u16 len, const void *data)
{
- int err;
-
- priv->trans.ops = &trans_ops;
- priv->trans.priv = priv;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)priv);
-
- iwl_alloc_isr_ict(priv);
-
- err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
- iwl_free_isr_ict(priv);
- return err;
- }
-
- INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
- return 0;
+ return iwl_trans_send_cmd(trans, &cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 7993aa7ae66..c5923125c3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -63,163 +63,289 @@
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
+#include <linux/debugfs.h>
+#include <linux/skbuff.h>
+
+#include "iwl-shared.h"
+#include "iwl-commands.h"
+
/*This file includes the declaration that are exported from the transport
* layer */
struct iwl_priv;
-struct iwl_rxon_context;
-struct iwl_host_cmd;
+struct iwl_shared;
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_ASYNC = BIT(0),
+ CMD_WANT_SKB = BIT(1),
+ CMD_ON_DEMAND = BIT(2),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
+struct iwl_device_cmd {
+ struct iwl_cmd_header hdr; /* uCode API */
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+#define IWL_MAX_CMD_TFDS 2
+
+enum iwl_hcmd_dataflag {
+ IWL_HCMD_DFL_NOCOPY = BIT(0),
+};
+
+/**
+ * struct iwl_host_cmd - Host command to the uCode
+ * @data: array of chunks that composes the data of the host command
+ * @reply_page: pointer to the page that holds the response to the host command
+ * @handler_status: return value of the handler of the command
+ * (put in setup_rx_handlers) - valid for SYNC mode only
+ * @callback:
+ * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
+ * @len: array of the lenths of the chunks in data
+ * @dataflags:
+ * @id: id of the host command
+ */
+struct iwl_host_cmd {
+ const void *data[IWL_MAX_CMD_TFDS];
+ unsigned long reply_page;
+ int handler_status;
+
+ u32 flags;
+ u16 len[IWL_MAX_CMD_TFDS];
+ u8 dataflags[IWL_MAX_CMD_TFDS];
+ u8 id;
+};
/**
* struct iwl_trans_ops - transport specific operations
+ * @alloc: allocates the meta data (not the queues themselves)
+ * @request_irq: requests IRQ - will be called before the FW load in probe flow
* @start_device: allocates and inits all the resources for the transport
* layer.
* @prepare_card_hw: claim the ownership on the HW. Will be called during
* probe.
* @tx_start: starts and configures all the Tx fifo - usually done once the fw
* is alive.
+ * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
* @stop_device:stops the whole device (embedded CPU put to reset)
- * @rx_free: frees the rx memory
- * @tx_free: frees the tx memory
* @send_cmd:send a host command
- * @send_cmd_pdu:send a host command: flags can be CMD_*
- * @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use
* @tx: send an skb
- * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
+ * @reclaim: free packet until ssn. Returns a list of freed packets.
+ * @tx_agg_alloc: allocate resources for a TX BA session
+ * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
- * @txq_agg_disable: de-configure a Tx queue to send AMPDUs
+ * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
* @kick_nic: remove the RESET from the embedded CPU and let it run
- * @sync_irq: the upper layer will typically disable interrupt and call this
- * handler. After this handler returns, it is guaranteed that all
- * the ISR / tasklet etc... have finished running and the transport
- * layer shall not pass any Rx.
* @free: release all the ressource for the transport layer itself such as
* irq, tasklet etc...
+ * @stop_queue: stop a specific queue
+ * @check_stuck_queue: check if a specific queue is stuck
+ * @wait_tx_queue_empty: wait until all tx queues are empty
+ * @dbgfs_register: add the dbgfs files under this directory. Files will be
+ * automatically deleted.
+ * @suspend: stop the device unless WoWLAN is configured
+ * @resume: resume activity of the device
*/
struct iwl_trans_ops {
- int (*start_device)(struct iwl_priv *priv);
- int (*prepare_card_hw)(struct iwl_priv *priv);
- void (*stop_device)(struct iwl_priv *priv);
- void (*tx_start)(struct iwl_priv *priv);
- void (*tx_free)(struct iwl_priv *priv);
- void (*rx_free)(struct iwl_priv *priv);
+ struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
+ int (*request_irq)(struct iwl_trans *iwl_trans);
+ int (*start_device)(struct iwl_trans *trans);
+ int (*prepare_card_hw)(struct iwl_trans *trans);
+ void (*stop_device)(struct iwl_trans *trans);
+ void (*tx_start)(struct iwl_trans *trans);
+
+ void (*wake_any_queue)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx);
- int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+ int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
- int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
- const void *data);
- struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id);
- int (*tx)(struct iwl_priv *priv, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx);
+ int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
+ u8 sta_id);
+ void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+ int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs);
- int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
- void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
- int frame_limit);
+ int (*tx_agg_disable)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid);
+ int (*tx_agg_alloc)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id, int tid,
+ u16 *ssn);
+ void (*tx_agg_setup)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id, int tid,
+ int frame_limit);
- void (*kick_nic)(struct iwl_priv *priv);
+ void (*kick_nic)(struct iwl_trans *trans);
- void (*sync_irq)(struct iwl_priv *priv);
- void (*free)(struct iwl_priv *priv);
+ void (*free)(struct iwl_trans *trans);
+
+ void (*stop_queue)(struct iwl_trans *trans, int q);
+
+ int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
+ int (*check_stuck_queue)(struct iwl_trans *trans, int q);
+ int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+#ifdef CONFIG_PM_SLEEP
+ int (*suspend)(struct iwl_trans *trans);
+ int (*resume)(struct iwl_trans *trans);
+#endif
};
+/**
+ * struct iwl_trans - transport common data
+ * @ops - pointer to iwl_trans_ops
+ * @shrd - pointer to iwl_shared which holds shared data from the upper layer
+ * @hcmd_lock: protects HCMD
+ */
struct iwl_trans {
const struct iwl_trans_ops *ops;
- struct iwl_priv *priv;
+ struct iwl_shared *shrd;
+ spinlock_t hcmd_lock;
+
+ /* pointer to trans specific struct */
+ /*Ensure that this pointer will always be aligned to sizeof pointer */
+ char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
};
-static inline int trans_start_device(struct iwl_trans *trans)
+static inline int iwl_trans_request_irq(struct iwl_trans *trans)
{
- return trans->ops->start_device(trans->priv);
+ return trans->ops->request_irq(trans);
}
-static inline int trans_prepare_card_hw(struct iwl_trans *trans)
+static inline int iwl_trans_start_device(struct iwl_trans *trans)
{
- return trans->ops->prepare_card_hw(trans->priv);
+ return trans->ops->start_device(trans);
}
-static inline void trans_stop_device(struct iwl_trans *trans)
+static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
{
- trans->ops->stop_device(trans->priv);
+ return trans->ops->prepare_card_hw(trans);
}
-static inline void trans_tx_start(struct iwl_trans *trans)
+static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
- trans->ops->tx_start(trans->priv);
+ trans->ops->stop_device(trans);
}
-static inline void trans_rx_free(struct iwl_trans *trans)
+static inline void iwl_trans_tx_start(struct iwl_trans *trans)
{
- trans->ops->rx_free(trans->priv);
+ trans->ops->tx_start(trans);
}
-static inline void trans_tx_free(struct iwl_trans *trans)
+static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx)
{
- trans->ops->tx_free(trans->priv);
+ trans->ops->wake_any_queue(trans, ctx);
}
-static inline int trans_send_cmd(struct iwl_trans *trans,
+
+static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
- return trans->ops->send_cmd(trans->priv, cmd);
+ return trans->ops->send_cmd(trans, cmd);
+}
+
+int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
+ u32 flags, u16 len, const void *data);
+
+static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
+ u8 sta_id)
+{
+ return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
}
-static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
- u16 len, const void *data)
+static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+ int tid, int txq_id, int ssn, u32 status,
+ struct sk_buff_head *skbs)
{
- return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data);
+ trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
}
-static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans,
- int txq_id)
+static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid)
{
- return trans->ops->get_tx_cmd(trans->priv, txq_id);
+ return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
}
-static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
- struct iwl_rxon_context *ctx)
+static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid, u16 *ssn)
{
- return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx);
+ return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
}
-static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
+
+static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid,
+ int frame_limit)
{
- return trans->ops->txq_agg_disable(trans->priv, txq_id,
- ssn_idx, tx_fifo);
+ trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
}
-static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id,
- int tid, int frame_limit)
+static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
{
- trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit);
+ trans->ops->kick_nic(trans);
}
-static inline void trans_kick_nic(struct iwl_trans *trans)
+static inline void iwl_trans_free(struct iwl_trans *trans)
{
- trans->ops->kick_nic(trans->priv);
+ trans->ops->free(trans);
}
-static inline void trans_sync_irq(struct iwl_trans *trans)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
{
- trans->ops->sync_irq(trans->priv);
+ trans->ops->stop_queue(trans, q);
}
-static inline void trans_free(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+ return trans->ops->wait_tx_queue_empty(trans);
+}
+
+static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
+{
+ return trans->ops->check_stuck_queue(trans, q);
+}
+static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
{
- trans->ops->free(trans->priv);
+ return trans->ops->dbgfs_register(trans, dir);
}
-int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
+{
+ return trans->ops->suspend(trans);
+}
-/*TODO: this functions should NOT be exported from trans module - export it
- * until the reclaim flow will be brought to the transport module too */
+static inline int iwl_trans_resume(struct iwl_trans *trans)
+{
+ return trans->ops->resume(trans);
+}
+#endif
-struct iwl_tx_queue;
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
+/*****************************************************
+* Transport layers implementations
+******************************************************/
+extern const struct iwl_trans_ops trans_ops_pcie;
#endif /* __iwl_trans_h__ */