diff options
author | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2010-03-17 13:34:35 -0700 |
---|---|---|
committer | Reinette Chatre <reinette.chatre@intel.com> | 2010-03-25 11:19:33 -0700 |
commit | 54b81550dd674466fe7d01629d2aab015c545a1e (patch) | |
tree | ed589d93ecef142f95379170e35e3224fd580fb4 /drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |
parent | 74bcdb33e99f49ef5202dd2f8109945b4570edc2 (diff) |
iwlwifi: move agn only rx functions from iwlcore to iwlagn
Identify the rx functions only used by agn driver and move those from
iwlcore to iwlagn.
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-lib.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-lib.c | 200 |
1 files changed, 199 insertions, 1 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 6f9d52d0446..3117382cfd4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -491,7 +491,7 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv) } else iwlagn_rx_queue_reset(priv, rxq); - iwl_rx_replenish(priv); + iwlagn_rx_replenish(priv); iwlagn_rx_init(priv, rxq); @@ -511,3 +511,201 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv) return 0; } + +/** + * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/** + * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +void iwlagn_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, + rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwlagn_rx_replenish(struct iwl_priv *priv) +{ + unsigned long flags; + + iwlagn_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwlagn_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +void iwlagn_rx_replenish_now(struct iwl_priv *priv) +{ + iwlagn_rx_allocate(priv, GFP_ATOMIC); + + iwlagn_rx_queue_restock(priv); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + +int iwlagn_rxq_stop(struct iwl_priv *priv) +{ + + /* stop Rx DMA */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + + return 0; +} |