summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 19:26:48 -0800
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 19:26:48 -0800
commitd5f398ed73522b9f76861af6553775c5851de0d0 (patch)
tree63bc695c221d15332c4ff9ec69f2a6e66c903563
parent8ad494b0e59950e2b4e587c32cb67a2452795ea0 (diff)
ixgbe: cleanup ixgbe_alloc_rx_buffers
This change re-orders alloc_rx_buffers to make better use of the packet split enabled flag. The new setup should require less branching in the code since now we are down to fewer if statements since we either are handling packet split or aren't. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c81
2 files changed, 43 insertions, 40 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 93946b683ad..149cf26b254 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -472,7 +472,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
struct ixgbe_tx_buffer *);
extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
- int cleaned_count);
+ u16 cleaned_count);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 480f0b0f038..e838479d2d9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1010,63 +1010,70 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
**/
void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
- int cleaned_count)
+ u16 cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len;
-
- i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
- if (!bi->page_dma &&
- (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = netdev_alloc_page(netdev);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
- if (!bi->skb) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
- bufsz);
- bi->skb = skb;
-
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(adapter->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
}
if (!bi->dma) {
bi->dma = dma_map_single(&pdev->dev,
- bi->skb->data,
+ skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
+
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(&pdev->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ bi->page_dma)) {
+ adapter->alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@@ -1077,15 +1084,11 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
}
}