summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-pcie.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c918
1 files changed, 663 insertions, 255 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 324d06dfb69..b4f796c82e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,8 +60,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
+#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
@@ -73,12 +76,18 @@
#include "iwl-eeprom.h"
#include "iwl-agn-hw.h"
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
+ (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+ (~(1<<(trans_pcie)->cmd_queue)))
+
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct device *dev = bus(trans)->dev;
+ struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
@@ -122,7 +131,7 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
- dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
@@ -146,17 +155,17 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
/* Stop Rx DMA */
- iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* Reset driver's Rx queue write index */
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
@@ -167,7 +176,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
* RB timeout 0x10
* 256 RBDs
*/
- iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
@@ -177,7 +186,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
/* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
static int iwl_rx_init(struct iwl_trans *trans)
@@ -215,10 +224,10 @@ static int iwl_rx_init(struct iwl_trans *trans)
iwl_trans_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
rxq->need_update = 1;
iwl_rx_queue_update_write_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return 0;
}
@@ -242,13 +251,13 @@ static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
iwl_trans_rxq_free_rx_bufs(trans);
spin_unlock_irqrestore(&rxq->lock, flags);
- dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
rxq->bd = NULL;
if (rxq->rb_stts)
- dma_free_coherent(bus(trans)->dev,
+ dma_free_coherent(trans->dev,
sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
else
@@ -261,8 +270,8 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
{
/* stop Rx DMA */
- iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
@@ -272,7 +281,7 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
if (WARN_ON(ptr->addr))
return -EINVAL;
- ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
&ptr->dma, GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
@@ -286,7 +295,7 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
if (unlikely(!ptr->addr))
return;
- dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
@@ -296,6 +305,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
{
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
return -EINVAL;
@@ -308,7 +318,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
if (!txq->meta || !txq->cmd)
goto error;
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++) {
txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
GFP_KERNEL);
@@ -319,7 +329,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
/* Alloc driver data array and TFD circular buffer */
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
- if (txq_id != trans->shrd->cmd_queue) {
+ if (txq_id != trans_pcie->cmd_queue) {
txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
GFP_KERNEL);
if (!txq->skbs) {
@@ -333,7 +343,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
- txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
&txq->q.dma_addr, GFP_KERNEL);
if (!txq->tfds) {
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
@@ -347,7 +357,7 @@ error:
txq->skbs = NULL;
/* since txq->cmd has been zeroed,
* all non allocated cmd[i] will be NULL */
- if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+ if (txq->cmd && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
kfree(txq->cmd[i]);
kfree(txq->meta);
@@ -385,11 +395,13 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
if (ret)
return ret;
+ spin_lock_init(&txq->lock);
+
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
return 0;
@@ -404,8 +416,6 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
enum dma_data_direction dma_dir;
- unsigned long flags;
- spinlock_t *lock;
if (!q->n_bd)
return;
@@ -413,22 +423,19 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
/* In the command queue, all the TBs are mapped as BIDI
* so unmap them as such.
*/
- if (txq_id == trans->shrd->cmd_queue) {
+ if (txq_id == trans_pcie->cmd_queue)
dma_dir = DMA_BIDIRECTIONAL;
- lock = &trans->hcmd_lock;
- } else {
+ else
dma_dir = DMA_TO_DEVICE;
- lock = &trans->shrd->sta_lock;
- }
- spin_lock_irqsave(lock, flags);
+ spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
/* The read_ptr needs to bound by q->n_window */
iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock_bh(&txq->lock);
}
/**
@@ -443,7 +450,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct device *dev = bus(trans)->dev;
+ struct device *dev = trans->dev;
int i;
if (WARN_ON(!txq))
return;
@@ -452,7 +459,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++)
kfree(txq->cmd[i]);
@@ -490,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
/* Tx queues */
if (trans_pcie->txq) {
for (txq_id = 0;
- txq_id < hw_params(trans).max_txq_num; txq_id++)
+ txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
iwl_tx_queue_free(trans, txq_id);
}
@@ -515,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+ u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
@@ -539,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
goto error;
}
- trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
+ trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
sizeof(struct iwl_tx_queue), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
@@ -548,8 +555,9 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
@@ -581,20 +589,21 @@ static int iwl_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Turn off all Tx DMA fifos */
- iwl_write_prph(bus(trans), SCD_TXFACT, 0);
+ iwl_write_prph(trans, SCD_TXFACT, 0);
/* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
@@ -619,49 +628,220 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
* to set power to V_AUX, do:
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
*/
- iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
}
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct pci_dev *pci_dev = trans_pcie->pci_dev;
+
+ pos = pci_pcie_cap(pci_dev);
+ pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+static void iwl_apm_config(struct iwl_trans *trans)
+{
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ u16 lctl = iwl_pciexp_link_ctrl(trans);
+
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, trans->dev,
+ "L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, trans->dev,
+ "L1 Disabled; Enabling L0S\n");
+ }
+ trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_apm_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = 0;
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwl_apm_config(trans);
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (cfg(trans)->base_params->pll_cfg_val)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG,
+ cfg(trans)->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+out:
+ return ret;
+}
+
+static int iwl_apm_stop_master(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(trans, "stop master\n");
+
+ return ret;
+}
+
+static void iwl_apm_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+ /* Stop device's DMA activity */
+ iwl_apm_stop_master(trans);
+
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
static int iwl_nic_init(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans->shrd->lock, flags);
- iwl_apm_init(priv(trans));
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_apm_init(trans);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(bus(trans), CSR_INT_COALESCING,
+ iwl_write8(trans, CSR_INT_COALESCING,
IWL_HOST_INT_CALIB_TIMEOUT_DEF);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
iwl_set_pwr_vmain(trans);
- iwl_nic_config(priv(trans));
+ iwl_op_mode_nic_config(trans->op_mode);
+#ifndef CONFIG_IWLWIFI_IDI
/* Allocate the RX queue, or reset if it is already allocated */
iwl_rx_init(trans);
+#endif
/* Allocate or reset and init all Tx and Command queues */
if (iwl_tx_init(trans))
return -ENOMEM;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
- iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
0x800FFFFF);
}
- set_bit(STATUS_INIT, &trans->shrd->status);
-
return 0;
}
@@ -672,11 +852,11 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
{
int ret;
- iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
- ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT);
@@ -686,21 +866,22 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
}
/* Note: returns standard 0/-ERROR code */
-static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
+static int iwl_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
ret = iwl_set_hw_ready(trans);
+ /* If the card is ready, exit 0 */
if (ret >= 0)
return 0;
/* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
- ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
@@ -767,13 +948,90 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
7, 6, 5, 4,
};
-static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
+/*
+ * ucode
+ */
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+ const struct fw_desc *section)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ dma_addr_t phy_addr = section->p_addr;
+ u32 byte_cnt = section->len;
+ u32 dst_addr = section->offset;
+ int ret;
+
+ trans_pcie->ucode_write_complete = false;
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(trans,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+ section_num);
+ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+ trans_pcie->ucode_write_complete, 5 * HZ);
+ if (!ret) {
+ IWL_ERR(trans, "Could not load the [%d] uCode section\n",
+ section_num);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].p_addr)
+ break;
+
+ ret = iwl_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(trans, CSR_RESET, 0);
+
+ return 0;
+}
+
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw)
{
int ret;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
- trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
@@ -783,26 +1041,23 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
- if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
- iwl_trans_pcie_prepare_card_hw(trans)) {
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
- else
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (iwl_is_rfkill(trans->shrd)) {
- iwl_set_hw_rfkill_state(priv(trans), true);
- iwl_enable_interrupts(trans);
+ if (hw_rfkill) {
+ iwl_enable_rfkill_int(trans);
return -ERFKILL;
}
- iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
ret = iwl_nic_init(trans);
if (ret) {
@@ -811,31 +1066,37 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
}
/* make sure rfkill handshake bits are cleared */
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
/* clear (again), then enable host interrupts */
- iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
iwl_enable_interrupts(trans);
/* really make sure rfkill handshake bits are cleared */
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- return 0;
+ /* Load the given image to the HW */
+ return iwl_load_given_ucode(trans, fw);
}
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->shrd->lock and mac access
+ * must be called under the irq lock and with MAC access
*/
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
- iwl_write_prph(bus(trans), SCD_TXFACT, mask);
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
+
+ iwl_write_prph(trans, SCD_TXFACT, mask);
}
-static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
+static void iwl_tx_start(struct iwl_trans *trans)
{
const struct queue_to_fifo_ac *queue_to_fifo;
struct iwl_trans_pcie *trans_pcie =
@@ -845,49 +1106,50 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
int i, chan;
u32 reg_val;
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
trans_pcie->scd_base_addr =
- iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
/* reset conext data memory */
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
a += 4)
- iwl_write_targ_mem(bus(trans), a, 0);
+ iwl_write_targ_mem(trans, a, 0);
/* reset tx status memory */
for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
a += 4)
- iwl_write_targ_mem(bus(trans), a, 0);
+ iwl_write_targ_mem(trans, a, 0);
for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
+ SCD_TRANS_TBL_OFFSET_QUEUE(
+ cfg(trans)->base_params->num_of_queues);
a += 4)
- iwl_write_targ_mem(bus(trans), a, 0);
+ iwl_write_targ_mem(trans, a, 0);
- iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
- reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
- iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans));
- iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
+ iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
+ SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
+ iwl_write_prph(trans, SCD_AGGR_SEL, 0);
/* initiate the queues */
- for (i = 0; i < hw_params(trans).max_txq_num; i++) {
- iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
((SCD_WIN_SIZE <<
@@ -898,8 +1160,8 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
}
- iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
- IWL_MASK(0, hw_params(trans).max_txq_num));
+ iwl_write_prph(trans, SCD_INTERRUPT_MASK,
+ IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -910,7 +1172,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
else
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+ iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
/* make sure all queue are not stopped */
memset(&trans_pcie->queue_stopped[0], 0,
@@ -941,40 +1203,47 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
fifo, 0);
}
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* Enable L1-Active */
- iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
+{
+ iwl_reset_ict(trans);
+ iwl_tx_start(trans);
+}
+
/**
* iwlagn_txq_ctx_stop - Stop all Tx DMA channels
*/
static int iwl_trans_tx_stop(struct iwl_trans *trans)
{
- int ch, txq_id;
+ int ch, txq_id, ret;
unsigned long flags;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_trans_txq_set_sched(trans, 0);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(bus(trans),
+ iwl_write_direct32(trans,
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
+ ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
+ 1000);
+ if (ret < 0)
IWL_ERR(trans, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(bus(trans),
+ iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
if (!trans_pcie->txq) {
IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
@@ -982,7 +1251,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
}
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++)
iwl_tx_queue_unmap(trans, txq_id);
return 0;
@@ -994,9 +1264,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* device going down, Stop using ICT table */
iwl_disable_ict(trans);
@@ -1008,36 +1278,50 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
iwl_trans_tx_stop(trans);
+#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_rx_stop(trans);
-
+#endif
/* Power-down device's busmaster DMA clocks */
- iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
+ iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
}
/* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
- iwl_apm_stop(priv(trans));
+ iwl_apm_stop(trans);
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* wait to make sure we flush pending tasklet*/
- synchronize_irq(bus(trans)->irq);
+ synchronize_irq(trans_pcie->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
/* stop and reset the on-board processor */
- iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+}
+
+static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+{
+ /* let the ucode operate on its own */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ iwl_disable_interrupts(trans);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1092,6 +1376,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ spin_lock(&txq->lock);
+
/* In AGG mode, the index in the ring must correspond to the WiFi
* sequence number. This is a HW requirements to help the SCD to parse
* the BA.
@@ -1134,11 +1420,11 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
- txcmd_phys = dma_map_single(bus(trans)->dev,
+ txcmd_phys = dma_map_single(trans->dev,
&dev_cmd->hdr, firstlen,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
- return -1;
+ if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+ goto out_err;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
@@ -1153,14 +1439,14 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* if any (802.11 null frames have no payload). */
secondlen = skb->len - hdr_len;
if (secondlen > 0) {
- phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
+ phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
- dma_unmap_single(bus(trans)->dev,
+ if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+ dma_unmap_single(trans->dev,
dma_unmap_addr(out_meta, mapping),
dma_unmap_len(out_meta, len),
DMA_BIDIRECTIONAL);
- return -1;
+ goto out_err;
}
}
@@ -1174,7 +1460,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
offsetof(struct iwl_tx_cmd, scratch);
/* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
+ dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -1182,16 +1468,14 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
le16_to_cpu(dev_cmd->hdr.sequence));
IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
- dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
+ dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
- trace_iwlwifi_dev_tx(priv(trans),
+ trace_iwlwifi_dev_tx(trans->dev,
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
@@ -1212,46 +1496,77 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(trans, txq, "Queue is full");
+ iwl_stop_queue(trans, txq);
}
}
+ spin_unlock(&txq->lock);
return 0;
+ out_err:
+ spin_unlock(&txq->lock);
+ return -1;
}
-static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(bus(trans), CSR_RESET, 0);
-}
-
-static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
+ bool hw_rfkill;
trans_pcie->inta_mask = CSR_INI_SET_MASK;
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)trans);
+ if (!trans_pcie->irq_requested) {
+ tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)trans);
- iwl_alloc_isr_ict(trans);
+ iwl_alloc_isr_ict(trans);
- err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, trans);
+ err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
+ DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n",
+ trans_pcie->irq);
+ goto error;
+ }
+
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
+ trans_pcie->irq_requested = true;
+ }
+
+ err = iwl_prepare_card_hw(trans);
if (err) {
- IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
- iwl_free_isr_ict(trans);
- return err;
+ IWL_ERR(trans, "Error while preparing HW: %d", err);
+ goto err_free_irq;
}
- INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
- return 0;
+ iwl_apm_init(trans);
+
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return err;
+
+err_free_irq:
+ free_irq(trans_pcie->irq, trans);
+error:
+ iwl_free_isr_ict(trans);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+ return err;
+}
+
+static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
+{
+ iwl_apm_stop(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /* Even if we stop the HW, we still want the RF kill interrupt */
+ iwl_enable_rfkill_int(trans);
}
static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
- int txq_id, int ssn, u32 status,
- struct sk_buff_head *skbs)
+ int txq_id, int ssn, struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1259,6 +1574,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
+ spin_lock(&txq->lock);
+
txq->time_stamp = jiffies;
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
@@ -1273,6 +1590,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
"agg_txq[sta_id[tid] %d", txq_id,
trans_pcie->agg_txq[sta_id][tid]);
+ spin_unlock(&txq->lock);
return 1;
}
@@ -1281,115 +1599,90 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
- (!txq->sched_retry ||
- status != TX_STATUS_FAIL_PASSIVE_NO_RX))
- iwl_wake_queue(trans, txq, "Packets reclaimed");
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
}
+
+ spin_unlock(&txq->lock);
return 0;
}
-static void iwl_trans_pcie_free(struct iwl_trans *trans)
+static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
- iwl_calib_free_results(trans);
- iwl_trans_pcie_tx_free(trans);
- iwl_trans_pcie_rx_free(trans);
- free_irq(bus(trans)->irq, trans);
- iwl_free_isr_ict(trans);
- trans->shrd->trans = NULL;
- kfree(trans);
+ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend
- * function first but since iwlagn_mac_stop() has no knowledge of
- * who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- *
- * But of course ... if we have configured WoWLAN then we did other
- * things already :-)
- */
- if (!trans->shrd->wowlan) {
- iwl_apm_stop(priv(trans));
- } else {
- iwl_disable_interrupts(trans);
- iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- }
-
- return 0;
+ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
- bool hw_rfkill = false;
-
- iwl_enable_interrupts(trans);
+ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
- if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+ if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+ trans_pcie->n_no_reclaim_cmds = 0;
else
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
-
- iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
-
- return 0;
+ trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+ if (trans_pcie->n_no_reclaim_cmds)
+ memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+ trans_pcie->n_no_reclaim_cmds * sizeof(u8));
}
-#endif /* CONFIG_PM_SLEEP */
-static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg)
+static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
- u8 ac, txq_id;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- for (ac = 0; ac < AC_NUM; ac++) {
- txq_id = trans_pcie->ac_to_queue[ctx][ac];
- IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
- ac,
- (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
- ? "stopped" : "awake");
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
+ iwl_trans_pcie_tx_free(trans);
+#ifndef CONFIG_IWLWIFI_IDI
+ iwl_trans_pcie_rx_free(trans);
+#endif
+ if (trans_pcie->irq_requested == true) {
+ free_irq(trans_pcie->irq, trans);
+ iwl_free_isr_ict(trans);
}
-}
-const struct iwl_trans_ops trans_ops_pcie;
+ pci_disable_msi(trans_pcie->pci_dev);
+ iounmap(trans_pcie->hw_base);
+ pci_release_regions(trans_pcie->pci_dev);
+ pci_disable_device(trans_pcie->pci_dev);
-static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
-{
- struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
- sizeof(struct iwl_trans_pcie),
- GFP_KERNEL);
- if (iwl_trans) {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- iwl_trans->ops = &trans_ops_pcie;
- iwl_trans->shrd = shrd;
- trans_pcie->trans = iwl_trans;
- spin_lock_init(&iwl_trans->hcmd_lock);
- }
+ trans->shrd->trans = NULL;
+ kfree(trans);
+}
- return iwl_trans;
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+ return 0;
}
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
- const char *msg)
+static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+ if (hw_rfkill)
+ iwl_enable_rfkill_int(trans);
+ else
+ iwl_enable_interrupts(trans);
- iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return 0;
}
+#endif /* CONFIG_PM_SLEEP */
#define IWL_FLUSH_WAIT_MS 2000
@@ -1403,8 +1696,8 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
int ret = 0;
/* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
- if (cnt == trans->shrd->cmd_queue)
+ for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+ if (cnt == trans_pcie->cmd_queue)
continue;
txq = &trans_pcie->txq[cnt];
q = &txq->q;
@@ -1446,9 +1739,9 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
- iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
& (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
return 1;
}
@@ -1502,7 +1795,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
- iwl_read_direct32(bus(trans), fh_tbl[i]));
+ iwl_read_direct32(trans, fh_tbl[i]));
}
return pos;
}
@@ -1511,7 +1804,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
- iwl_read_direct32(bus(trans), fh_tbl[i]));
+ iwl_read_direct32(trans, fh_tbl[i]));
}
return 0;
}
@@ -1581,7 +1874,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
IWL_ERR(trans, " %25s: 0X%08x\n",
get_csr_string(csr_tbl[i]),
- iwl_read32(bus(trans), csr_tbl[i]));
+ iwl_read32(trans, csr_tbl[i]));
}
}
@@ -1649,7 +1942,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
int pos = 0;
int cnt;
int ret;
- const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
+ size_t bufsz;
+
+ bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
if (!trans_pcie->txq) {
IWL_ERR(trans, "txq not ready\n");
@@ -1659,7 +1954,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
if (!buf)
return -ENOMEM;
- for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
@@ -1902,14 +2197,13 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
#endif /*CONFIG_IWLWIFI_DEBUGFS */
const struct iwl_trans_ops trans_ops_pcie = {
- .alloc = iwl_trans_pcie_alloc,
- .request_irq = iwl_trans_pcie_request_irq,
- .start_device = iwl_trans_pcie_start_device,
- .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
+ .start_hw = iwl_trans_pcie_start_hw,
+ .stop_hw = iwl_trans_pcie_stop_hw,
+ .fw_alive = iwl_trans_pcie_fw_alive,
+ .start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .tx_start = iwl_trans_pcie_tx_start,
- .wake_any_queue = iwl_trans_pcie_wake_any_queue,
+ .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
.send_cmd = iwl_trans_pcie_send_cmd,
@@ -1920,10 +2214,7 @@ const struct iwl_trans_ops trans_ops_pcie = {
.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
- .kick_nic = iwl_trans_pcie_kick_nic,
-
.free = iwl_trans_pcie_free,
- .stop_queue = iwl_trans_pcie_stop_queue,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
@@ -1934,4 +2225,121 @@ const struct iwl_trans_ops trans_ops_pcie = {
.suspend = iwl_trans_pcie_suspend,
.resume = iwl_trans_pcie_resume,
#endif
+ .write8 = iwl_trans_pcie_write8,
+ .write32 = iwl_trans_pcie_write32,
+ .read32 = iwl_trans_pcie_read32,
+ .configure = iwl_trans_pcie_configure,
};
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
+ struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ u16 pci_cmd;
+ int err;
+
+ trans = kzalloc(sizeof(struct iwl_trans) +
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+
+ if (WARN_ON(!trans))
+ return NULL;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans->ops = &trans_ops_pcie;
+ trans->shrd = shrd;
+ trans_pcie->trans = trans;
+ spin_lock_init(&trans_pcie->irq_lock);
+ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_no_pci;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+ goto out_pci_disable_device;
+ }
+
+ trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!trans_pcie->hw_base) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_base = %p\n", trans_pcie->hw_base);
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "HW Revision ID = 0x%X\n", pdev->revision);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ err = pci_enable_msi(pdev);
+ if (err)
+ dev_printk(KERN_ERR, &pdev->dev,
+ "pci_enable_msi failed(0X%x)", err);
+
+ trans->dev = &pdev->dev;
+ trans_pcie->irq = pdev->irq;
+ trans_pcie->pci_dev = pdev;
+ trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+ snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+ "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+
+ /* TODO: Move this away, not needed if not MSI */
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans->wait_command_queue);
+
+ return trans;
+
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_no_pci:
+ kfree(trans);
+ return NULL;
+}
+