diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_tx.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 40 |
1 files changed, 18 insertions, 22 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 13457032d15..dd1f6d34645 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -44,16 +44,6 @@ #include "mlx4_en.h" -enum { - MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ - MAX_BF = 256, -}; - -static int inline_thold __read_mostly = MAX_INLINE; - -module_param_named(inline_thold, inline_thold, int, 0444); -MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); - int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, int qpn, u32 size, u16 stride, int node, int queue_index) @@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; ring->stride = stride; - - inline_thold = min(inline_thold, MAX_INLINE); + ring->inline_thold = priv->prof->inline_thold; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc_node(tmp, node); @@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, } } } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return tx_info->nr_txbb; } @@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, */ if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; + ring->wake_queue++; } return done; } @@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, return ring->buf + index * TXBB_SIZE; } -static int is_inline(struct sk_buff *skb, void **pfrag) +static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag) { void *ptr; @@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, } } else { *lso_header_size = 0; - if (!is_inline(skb, NULL)) + if (!is_inline(priv->prof->inline_thold, skb, NULL)) real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; else real_size = inline_size(skb); @@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; if (skb->len <= spc) { - inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + if (likely(skb->len >= MIN_PKT_LEN)) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + } else { + inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN); + memset(((void *)(inl + 1)) + skb->len, 0, + MIN_PKT_LEN - skb->len); + } skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); if (skb_shinfo(skb)->nr_frags) memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, @@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - priv->port_stats.queue_stopped++; + ring->queue_stopped++; /* If queue was emptied after the if, and before the * stop_queue - need to wake the queue, or else it will remain @@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(((int)(ring->prod - ring->cons)) <= ring->size - HEADROOM - MAX_DESC_TXBBS)) { netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; + ring->wake_queue++; } else { return NETDEV_TX_BUSY; } @@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->data_offset = (void *)data - (void *)tx_desc; tx_info->linear = (lso_header_size < skb_headlen(skb) && - !is_inline(skb, NULL)) ? 1 : 0; + !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0; data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; - if (is_inline(skb, &fragptr)) { + if (is_inline(ring->inline_thold, skb, &fragptr)) { tx_info->inl = 1; } else { /* Map fragments */ @@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { - *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); + tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn); + op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory * before setting ownership of this descriptor to HW */ |