diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 39 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_main.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_port.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_rx.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 400 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/eq.c | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.c | 47 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/main.c | 486 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 101 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/port.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/qp.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 38 |
17 files changed, 847 insertions, 467 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 65a4a0f88ea..b16e1b95566 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -580,8 +580,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, err = context->result; if (err) { - mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", - op, context->fw_status); + /* Since we do not want to have this error message always + * displayed at driver start when there are ConnectX2 HCAs + * on the host, we deprecate the error message for this + * specific command/input_mod/opcode_mod/fw-status to be debug. + */ + if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && + op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) + mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + else + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); goto out; } @@ -1695,7 +1705,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) if (err) { vp_oper->vlan_idx = NO_INDX; mlx4_warn(&priv->dev, - "No vlan resorces slave %d, port %d\n", + "No vlan resources slave %d, port %d\n", slave, port); return err; } @@ -1711,7 +1721,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) err = vp_oper->mac_idx; vp_oper->mac_idx = NO_INDX; mlx4_warn(&priv->dev, - "No mac resorces slave %d, port %d\n", + "No mac resources slave %d, port %d\n", slave, port); return err; } @@ -2389,6 +2399,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( } EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); +static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + 1; + int max_port = min_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port < min_port) + port = min_port; + else if (port >= max_port) + port = max_port - 1; + + return port; +} + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2402,6 +2428,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mac; mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", @@ -2428,6 +2455,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; if ((0 == vlan) && (0 == qos)) @@ -2455,6 +2483,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, struct mlx4_priv *priv; priv = mlx4_priv(dev); + port = mlx4_slaves_closest_port(dev, slave, port); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { @@ -2482,6 +2511,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->spoofchk = setting; @@ -2535,6 +2565,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: /* get current link state */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784f..ae83da9cd18 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -112,6 +112,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { /* port statistics */ "tso_packets", + "xmit_more", "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", "rx_csum_good", "rx_csum_none", "tx_chksum_offload", @@ -487,6 +488,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, struct mlx4_en_dev *mdev = priv->mdev; int err; + if (pause->autoneg) + return -EINVAL; + priv->prof->tx_pause = pause->tx_pause != 0; priv->prof->rx_pause = pause->rx_pause != 0; err = mlx4_SET_PORT_general(mdev->dev, priv->port, @@ -1263,6 +1267,48 @@ static u32 mlx4_en_get_priv_flags(struct net_device *dev) return priv->pflags; } +static int mlx4_en_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + void *data) +{ + const struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + *(u32 *)data = priv->prof->inline_thold; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mlx4_en_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int val, ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + val = *(u32 *)data; + if (val < MIN_PKT_LEN || val > MAX_INLINE) + ret = -EINVAL; + else + priv->prof->inline_thold = val; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, @@ -1293,6 +1339,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_ts_info = mlx4_en_get_ts_info, .set_priv_flags = mlx4_en_set_priv_flags, .get_priv_flags = mlx4_en_get_priv_flags, + .get_tunable = mlx4_en_get_tunable, + .set_tunable = mlx4_en_set_tunable, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 3626fdf4cb5..2091ae88615 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -78,27 +78,24 @@ MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, #define MAX_PFC_TX 0xff #define MAX_PFC_RX 0xff -int en_print(const char *level, const struct mlx4_en_priv *priv, - const char *format, ...) +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...) { va_list args; struct va_format vaf; - int i; va_start(args, format); vaf.fmt = format; vaf.va = &args; if (priv->registered) - i = printk("%s%s: %s: %pV", - level, DRV_NAME, priv->dev->name, &vaf); + printk("%s%s: %s: %pV", + level, DRV_NAME, priv->dev->name, &vaf); else - i = printk("%s%s: %s: Port %d: %pV", - level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), - priv->port, &vaf); + printk("%s%s: %s: Port %d: %pV", + level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), + priv->port, &vaf); va_end(args); - - return i; } void mlx4_en_update_loopback_state(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index abddcf8c40a..f3032fec8fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2459,6 +2459,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; + priv->cqe_size = mdev->dev->caps.cqe_size; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index c2cfb05e729..0a0261d128b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -150,14 +150,19 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.tx_chksum_offload = 0; priv->port_stats.queue_stopped = 0; priv->port_stats.wake_queue = 0; + priv->port_stats.tso_packets = 0; + priv->port_stats.xmit_more = 0; for (i = 0; i < priv->tx_ring_num; i++) { - stats->tx_packets += priv->tx_ring[i]->packets; - stats->tx_bytes += priv->tx_ring[i]->bytes; - priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; - priv->port_stats.queue_stopped += - priv->tx_ring[i]->queue_stopped; - priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue; + const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; + + stats->tx_packets += ring->packets; + stats->tx_bytes += ring->bytes; + priv->port_stats.tx_chksum_offload += ring->tx_csum; + priv->port_stats.queue_stopped += ring->queue_stopped; + priv->port_stats.wake_queue += ring->wake_queue; + priv->port_stats.tso_packets += ring->tso_packets; + priv->port_stats.xmit_more += ring->xmit_more; } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9c909d23f14..01660c595f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -76,10 +76,10 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, page_alloc->dma = dma; page_alloc->page_offset = frag_info->frag_align; /* Not doing get_page() for each frag is a big win - * on asymetric workloads. + * on asymetric workloads. Note we can not use atomic_set(). */ - atomic_set(&page->_count, - page_alloc->page_size / frag_info->frag_stride); + atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, + &page->_count); return 0; } @@ -588,6 +588,8 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, skb_copy_to_linear_data(skb, va, length); skb->tail += length; } else { + unsigned int pull_len; + /* Move relevant fragments to skb */ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, skb, length); @@ -597,16 +599,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, } skb_shinfo(skb)->nr_frags = used_frags; + pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); /* Copy headers into the skb linear buffer */ - memcpy(skb->data, va, HEADER_COPY_SIZE); - skb->tail += HEADER_COPY_SIZE; + memcpy(skb->data, va, pull_len); + skb->tail += pull_len; /* Skip headers in first fragment */ - skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; + skb_shinfo(skb)->frags[0].page_offset += pull_len; /* Adjust size of first fragment */ - skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); - skb->data_len = length - HEADER_COPY_SIZE; + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); + skb->data_len = length - pull_len; } return skb; } @@ -668,7 +671,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -769,7 +772,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->ip_summed = CHECKSUM_UNNECESSARY; if (l2_tunnel) - gro_skb->encapsulation = 1; + gro_skb->csum_level = 1; if ((cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { @@ -823,8 +826,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); - if (l2_tunnel) - skb->encapsulation = 1; + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_level = 1; if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, @@ -855,7 +858,7 @@ next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; if (++polled == budget) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index dae3da6d8dd..34c13787854 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -37,6 +37,7 @@ #include <linux/mlx4/qp.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> +#include <linux/prefetch.h> #include <linux/vmalloc.h> #include <linux/tcp.h> #include <linux/ip.h> @@ -65,10 +66,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; ring->stride = stride; - ring->inline_thold = priv->prof->inline_thold; tmp = size * sizeof(struct mlx4_en_tx_info); - ring->tx_info = vmalloc_node(tmp, node); + ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); if (!ring->tx_info) { ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { @@ -151,7 +151,7 @@ err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; err_info: - vfree(ring->tx_info); + kvfree(ring->tx_info); ring->tx_info = NULL; err_ring: kfree(ring); @@ -174,7 +174,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; - vfree(ring->tx_info); + kvfree(ring->tx_info); ring->tx_info = NULL; kfree(ring); *pring = NULL; @@ -191,12 +191,12 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->prod = 0; ring->cons = 0xffffffff; ring->last_nr_txbb = 1; - ring->poll_cnt = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = ring->qp.qpn << 8; + ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); + ring->mr_key = cpu_to_be32(mdev->mr.key); mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, user_prio, &ring->context); @@ -259,38 +259,45 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u8 owner, u64 timestamp) { - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; - struct sk_buff *skb = tx_info->skb; - struct skb_frag_struct *frag; void *end = ring->buf + ring->buf_size; - int frags = skb_shinfo(skb)->nr_frags; + struct sk_buff *skb = tx_info->skb; + int nr_maps = tx_info->nr_maps; int i; - struct skb_shared_hwtstamps hwts; - if (timestamp) { - mlx4_en_fill_hwtstamps(mdev, &hwts, timestamp); + /* We do not touch skb here, so prefetch skb->users location + * to speedup consume_skb() + */ + prefetchw(&skb->users); + + if (unlikely(timestamp)) { + struct skb_shared_hwtstamps hwts; + + mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp); skb_tstamp_tx(skb, &hwts); } /* Optimize the common case when there are no wraparounds */ if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { if (!tx_info->inl) { - if (tx_info->linear) { + if (tx_info->linear) dma_unmap_single(priv->ddev, - (dma_addr_t) be64_to_cpu(data->addr), - be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); - ++data; - } - - for (i = 0; i < frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else dma_unmap_page(priv->ddev, - (dma_addr_t) be64_to_cpu(data[i].addr), - skb_frag_size(frag), PCI_DMA_TODEVICE); + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; + dma_unmap_page(priv->ddev, + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); } } } else { @@ -299,27 +306,29 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, data = ring->buf + ((void *)data - end); } - if (tx_info->linear) { + if (tx_info->linear) dma_unmap_single(priv->ddev, - (dma_addr_t) be64_to_cpu(data->addr), - be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); - ++data; - } - - for (i = 0; i < frags; i++) { + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else + dma_unmap_page(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; /* Check for wraparound before unmapping */ if ((void *) data >= end) data = ring->buf; - frag = &skb_shinfo(skb)->frags[i]; dma_unmap_page(priv->ddev, - (dma_addr_t) be64_to_cpu(data->addr), - skb_frag_size(frag), PCI_DMA_TODEVICE); - ++data; + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); } } } - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); return tx_info->nr_txbb; } @@ -377,13 +386,19 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, u64 timestamp = 0; int done = 0; int budget = priv->tx_work_limit; + u32 last_nr_txbb; + u32 ring_cons; if (!priv->port_up) return true; + netdev_txq_bql_complete_prefetchw(ring->tx_queue); + index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; - ring_index = ring->cons & size_mask; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; + last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); + ring_cons = ACCESS_ONCE(ring->cons); + ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ @@ -408,19 +423,19 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { - txbbs_skipped += ring->last_nr_txbb; - ring_index = (ring_index + ring->last_nr_txbb) & size_mask; + txbbs_skipped += last_nr_txbb; + ring_index = (ring_index + last_nr_txbb) & size_mask; if (ring->tx_info[ring_index].ts_requested) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ - ring->last_nr_txbb = mlx4_en_free_tx_desc( + last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index, - !!((ring->cons + txbbs_skipped) & + !!((ring_cons + txbbs_skipped) & ring->size), timestamp); mlx4_en_stamp_wqe(priv, ring, stamp_index, - !!((ring->cons + txbbs_stamp) & + !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; @@ -430,7 +445,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, ++cons_index; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } @@ -441,7 +456,11 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); - ring->cons += txbbs_skipped; + + /* we want to dirty this cache line once */ + ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; + ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* @@ -512,30 +531,35 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, return ring->buf + index * TXBB_SIZE; } -static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag) +/* Decide if skb can be inlined in tx descriptor to avoid dma mapping + * + * It seems strange we do not simply use skb_copy_bits(). + * This would allow to inline all skbs iff skb->len <= inline_thold + * + * Note that caller already checked skb was not a gso packet + */ +static bool is_inline(int inline_thold, const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + void **pfrag) { void *ptr; - if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { - if (skb_shinfo(skb)->nr_frags == 1) { - ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); - if (unlikely(!ptr)) - return 0; - - if (pfrag) - *pfrag = ptr; + if (skb->len > inline_thold || !inline_thold) + return false; - return 1; - } else if (unlikely(skb_shinfo(skb)->nr_frags)) - return 0; - else - return 1; + if (shinfo->nr_frags == 1) { + ptr = skb_frag_address_safe(&shinfo->frags[0]); + if (unlikely(!ptr)) + return false; + *pfrag = ptr; + return true; } - - return 0; + if (shinfo->nr_frags) + return false; + return true; } -static int inline_size(struct sk_buff *skb) +static int inline_size(const struct sk_buff *skb) { if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) <= MLX4_INLINE_ALIGN) @@ -546,18 +570,23 @@ static int inline_size(struct sk_buff *skb) sizeof(struct mlx4_wqe_inline_seg), 16); } -static int get_real_size(struct sk_buff *skb, struct net_device *dev, - int *lso_header_size) +static int get_real_size(const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + struct net_device *dev, + int *lso_header_size, + bool *inline_ok, + void **pfrag) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; - if (skb_is_gso(skb)) { + if (shinfo->gso_size) { + *inline_ok = false; if (skb->encapsulation) *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); else *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); - real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + + real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + ALIGN(*lso_header_size + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if @@ -572,20 +601,28 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, } } else { *lso_header_size = 0; - if (!is_inline(priv->prof->inline_thold, skb, NULL)) - real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; - else + *inline_ok = is_inline(priv->prof->inline_thold, skb, + shinfo, pfrag); + + if (*inline_ok) real_size = inline_size(skb); + else + real_size = CTRL_SIZE + + (shinfo->nr_frags + 1) * DS_SIZE; } return real_size; } -static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, - int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) +static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, + const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + int real_size, u16 *vlan_tag, + int tx_ind, void *fragptr) { struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + unsigned int hlen = skb_headlen(skb); if (skb->len <= spc) { if (likely(skb->len >= MIN_PKT_LEN)) { @@ -595,19 +632,19 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk memset(((void *)(inl + 1)) + skb->len, 0, MIN_PKT_LEN - skb->len); } - skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); - if (skb_shinfo(skb)->nr_frags) - memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, - skb_frag_size(&skb_shinfo(skb)->frags[0])); + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen, fragptr, + skb_frag_size(&shinfo->frags[0])); } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); - if (skb_headlen(skb) <= spc) { - skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); - if (skb_headlen(skb) < spc) { - memcpy(((void *)(inl + 1)) + skb_headlen(skb), - fragptr, spc - skb_headlen(skb)); - fragptr += spc - skb_headlen(skb); + if (hlen <= spc) { + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (hlen < spc) { + memcpy(((void *)(inl + 1)) + hlen, + fragptr, spc - hlen); + fragptr += spc - hlen; } inl = (void *) (inl + 1) + spc; memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); @@ -615,10 +652,11 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk skb_copy_from_linear_data(skb, inl + 1, spc); inl = (void *) (inl + 1) + spc; skb_copy_from_linear_data_offset(skb, spc, inl + 1, - skb_headlen(skb) - spc); - if (skb_shinfo(skb)->nr_frags) - memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, - fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); + hlen - spc); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen - spc, + fragptr, + skb_frag_size(&shinfo->frags[0])); } wmb(); @@ -642,15 +680,16 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, return fallback(dev, skb) % rings_p_up + up * rings_p_up; } -static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) +static void mlx4_bf_copy(void __iomem *dst, const void *src, + unsigned int bytecnt) { __iowrite64_copy(dst, src, bytecnt / 8); } netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) { + struct skb_shared_info *shinfo = skb_shinfo(skb); struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; struct device *ddev = priv->ddev; struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; @@ -663,15 +702,26 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) u32 index, bf_index; __be32 op_own; u16 vlan_tag = 0; - int i; + int i_frag; int lso_header_size; - void *fragptr; + void *fragptr = NULL; bool bounce = false; + bool send_doorbell; + bool stop_queue; + bool inline_ok; + u32 ring_cons; if (!priv->port_up) goto tx_drop; - real_size = get_real_size(skb, dev, &lso_header_size); + tx_ind = skb_get_queue_mapping(skb); + ring = priv->tx_ring[tx_ind]; + + /* fetch ring->cons far ahead before needing it to avoid stall */ + ring_cons = ACCESS_ONCE(ring->cons); + + real_size = get_real_size(skb, shinfo, dev, &lso_header_size, + &inline_ok, &fragptr); if (unlikely(!real_size)) goto tx_drop; @@ -684,38 +734,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; } - tx_ind = skb->queue_mapping; - ring = priv->tx_ring[tx_ind]; if (vlan_tx_tag_present(skb)) vlan_tag = vlan_tx_tag_get(skb); - /* Check available TXBBs And 2K spare for prefetch */ - if (unlikely(((int)(ring->prod - ring->cons)) > - ring->size - HEADROOM - MAX_DESC_TXBBS)) { - /* every full Tx ring stops queue */ - netif_tx_stop_queue(ring->tx_queue); - ring->queue_stopped++; - /* If queue was emptied after the if, and before the - * stop_queue - need to wake the queue, or else it will remain - * stopped forever. - * Need a memory barrier to make sure ring->cons was not - * updated before queue was stopped. - */ - wmb(); - - if (unlikely(((int)(ring->prod - ring->cons)) <= - ring->size - HEADROOM - MAX_DESC_TXBBS)) { - netif_tx_wake_queue(ring->tx_queue); - ring->wake_queue++; - } else { - return NETDEV_TX_BUSY; - } - } + netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); /* Track current inflight packets for performance analysis */ AVG_PERF_COUNTER(priv->pstats.inflight_avg, - (u32) (ring->prod - ring->cons - 1)); + (u32)(ring->prod - ring_cons - 1)); /* Packet is good - grab an index and transmit it */ index = ring->prod & ring->size_mask; @@ -735,46 +762,48 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->skb = skb; tx_info->nr_txbb = nr_txbb; + data = &tx_desc->data; if (lso_header_size) data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4, DS_SIZE)); - else - data = &tx_desc->data; /* valid only for none inline segments */ tx_info->data_offset = (void *)data - (void *)tx_desc; + tx_info->inl = inline_ok; + tx_info->linear = (lso_header_size < skb_headlen(skb) && - !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0; + !inline_ok) ? 1 : 0; - data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; + tx_info->nr_maps = shinfo->nr_frags + tx_info->linear; + data += tx_info->nr_maps - 1; - if (is_inline(ring->inline_thold, skb, &fragptr)) { - tx_info->inl = 1; - } else { - /* Map fragments */ - for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { - struct skb_frag_struct *frag; - dma_addr_t dma; + if (!tx_info->inl) { + dma_addr_t dma = 0; + u32 byte_count = 0; + + /* Map fragments if any */ + for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) { + const struct skb_frag_struct *frag; - frag = &skb_shinfo(skb)->frags[i]; + frag = &shinfo->frags[i_frag]; + byte_count = skb_frag_size(frag); dma = skb_frag_dma_map(ddev, frag, - 0, skb_frag_size(frag), + 0, byte_count, DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) goto tx_drop_unmap; data->addr = cpu_to_be64(dma); - data->lkey = cpu_to_be32(mdev->mr.key); + data->lkey = ring->mr_key; wmb(); - data->byte_count = cpu_to_be32(skb_frag_size(frag)); + data->byte_count = cpu_to_be32(byte_count); --data; } - /* Map linear part */ + /* Map linear part if needed */ if (tx_info->linear) { - u32 byte_count = skb_headlen(skb) - lso_header_size; - dma_addr_t dma; + byte_count = skb_headlen(skb) - lso_header_size; dma = dma_map_single(ddev, skb->data + lso_header_size, byte_count, @@ -783,29 +812,28 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop_unmap; data->addr = cpu_to_be64(dma); - data->lkey = cpu_to_be32(mdev->mr.key); + data->lkey = ring->mr_key; wmb(); data->byte_count = cpu_to_be32(byte_count); } - tx_info->inl = 0; + /* tx completion can avoid cache line miss for common cases */ + tx_info->map0_dma = dma; + tx_info->map0_byte_count = byte_count; } /* * For timestamping add flag to skb_shinfo and * set flag for further reference */ - if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && - skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_info->ts_requested = 0; + if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && + shinfo->tx_flags & SKBTX_HW_TSTAMP)) { + shinfo->tx_flags |= SKBTX_IN_PROGRESS; tx_info->ts_requested = 1; } /* Prepare ctrl segement apart opcode+ownership, which depends on * whether LSO is used */ - tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - !!vlan_tx_tag_present(skb); - tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | @@ -826,6 +854,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Handle LSO (TSO) packets */ if (lso_header_size) { + int i; + /* Mark opcode as LSO */ op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | ((ring->prod & ring->size) ? @@ -833,15 +863,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Fill in the LSO prefix */ tx_desc->lso.mss_hdr_size = cpu_to_be32( - skb_shinfo(skb)->gso_size << 16 | lso_header_size); + shinfo->gso_size << 16 | lso_header_size); /* Copy headers; * note that we already verified that it is linear */ memcpy(tx_desc->lso.header, skb->data, lso_header_size); - priv->port_stats.tso_packets++; - i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + - !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); + ring->tso_packets++; + + i = ((skb->len - lso_header_size) / shinfo->gso_size) + + !!((skb->len - lso_header_size) % shinfo->gso_size); tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; ring->packets += i; } else { @@ -851,16 +882,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); ring->packets++; - } ring->bytes += tx_info->nr_bytes; netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); - if (tx_info->inl) { - build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); - tx_info->inl = 1; - } + if (tx_info->inl) + build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, + tx_ind, fragptr); if (skb->encapsulation) { struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); @@ -873,44 +902,85 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->prod += nr_txbb; /* If we used a bounce buffer then copy descriptor back into place */ - if (bounce) + if (unlikely(bounce)) tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); skb_tx_timestamp(skb); - if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { - tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn); + /* Check available TXBBs And 2K spare for prefetch */ + stop_queue = (int)(ring->prod - ring_cons) > + ring->size - HEADROOM - MAX_DESC_TXBBS; + if (unlikely(stop_queue)) { + netif_tx_stop_queue(ring->tx_queue); + ring->queue_stopped++; + } + send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); + + real_size = (real_size / 16) & 0x3f; + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && + !vlan_tx_tag_present(skb) && send_doorbell) { + tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | + cpu_to_be32(real_size); op_own |= htonl((bf_index & 0xffff) << 8); - /* Ensure new descirptor hits memory - * before setting ownership of this descriptor to HW */ + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ wmb(); tx_desc->ctrl.owner_opcode = op_own; wmb(); - mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, - desc_size); + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, + desc_size); wmb(); ring->bf.offset ^= ring->bf.buf_size; } else { - /* Ensure new descirptor hits memory - * before setting ownership of this descriptor to HW */ + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * + !!vlan_tx_tag_present(skb); + tx_desc->ctrl.fence_size = real_size; + + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ wmb(); tx_desc->ctrl.owner_opcode = op_own; - wmb(); - iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + if (send_doorbell) { + wmb(); + iowrite32(ring->doorbell_qpn, + ring->bf.uar->map + MLX4_SEND_DOORBELL); + } else { + ring->xmit_more++; + } } + if (unlikely(stop_queue)) { + /* If queue was emptied after the if (stop_queue) , and before + * the netif_tx_stop_queue() - need to wake the queue, + * or else it will remain stopped forever. + * Need a memory barrier to make sure ring->cons was not + * updated before queue was stopped. + */ + smp_rmb(); + + ring_cons = ACCESS_ONCE(ring->cons); + if (unlikely(((int)(ring->prod - ring_cons)) <= + ring->size - HEADROOM - MAX_DESC_TXBBS)) { + netif_tx_wake_queue(ring->tx_queue); + ring->wake_queue++; + } + } return NETDEV_TX_OK; tx_drop_unmap: en_err(priv, "DMA mapping error\n"); - for (i++; i < skb_shinfo(skb)->nr_frags; i++) { - data++; + while (++i_frag < shinfo->nr_frags) { + ++data; dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), PCI_DMA_TODEVICE); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2a004b347e1..a49c9d11d8a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -101,21 +101,24 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not) mb(); } -static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, + u8 eqe_size) { /* (entry & (eq->nent - 1)) gives us a cyclic array */ - unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); - /* CX3 is capable of extending the EQE from 32 to 64 bytes. - * When this feature is enabled, the first (in the lower addresses) + unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; + /* CX3 is capable of extending the EQE from 32 to 64 bytes with + * strides of 64B,128B and 256B. + * When 64B EQE is used, the first (in the lower addresses) * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes * contain the legacy EQE information. + * In all other cases, the first 32B contains the legacy EQE info. */ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; } -static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) { - struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } @@ -459,8 +462,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) enum slave_port_gen_event gen_event; unsigned long flags; struct mlx4_vport_state *s_info; + int eqe_size = dev->caps.eqe_size; - while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { + while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -894,8 +898,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B. + */ + npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); @@ -997,8 +1003,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox; int err; int i; - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B + */ + int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 494753e44ae..2e88a235e26 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -137,7 +137,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [8] = "Dynamic QP updates support", [9] = "Device managed flow steering IPoIB support", [10] = "TCP/IP offloads/flow-steering for VXLAN support", - [11] = "MAD DEMUX (Secure-Host) support" + [11] = "MAD DEMUX (Secure-Host) support", + [12] = "Large cache line (>64B) CQE stride support", + [13] = "Large cache line (>64B) EQE stride support" }; int i; @@ -557,6 +559,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 +#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -733,6 +736,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->max_rq_sg = field; MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 6)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + if (field & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); @@ -974,8 +982,13 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, if (port < 0) return -EINVAL; - vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | - (port & 0xFF); + /* Protect against untrusted guests: enforce that this is the + * QUERY_PORT general query. + */ + if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) + return -EINVAL; + + vhcr->in_modifier = port; err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, @@ -1376,6 +1389,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) +#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) @@ -1452,11 +1466,25 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { + dev->caps.eqe_size = cache_line_size(); + dev->caps.cqe_size = cache_line_size(); + dev->caps.eqe_factor = 0; + MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | + (ilog2(dev->caps.eqe_size) - 5)), + INIT_HCA_EQE_CQE_STRIDE_OFFSET); + + /* User still need to know to support CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); @@ -1616,6 +1644,17 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, if (byte_field & 0x40) /* 64-bytes cqe enabled */ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); + if (byte_field) { + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + param->cqe_size = 1 << ((byte_field & + MLX4_CQE_SIZE_MASK_STRIDE) + 5); + param->eqe_size = 1 << (((byte_field & + MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); + } + /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 1fce03ebe5c..9b835aecac9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -178,6 +178,8 @@ struct mlx4_init_hca_param { u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; + u16 cqe_size; /* For use only when CQE stride feature enabled */ + u16 eqe_size; /* For use only when EQE stride feature enabled */ }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e2d5d57c59..90de6e1ad06 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #endif /* CONFIG_PCI_MSI */ static uint8_t num_vfs[3] = {0, 0, 0}; -static int num_vfs_argc = 3; +static int num_vfs_argc; module_param_array(num_vfs, byte , &num_vfs_argc, 0444); MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" "num_vfs=port1,port2,port1+2"); static uint8_t probe_vf[3] = {0, 0, 0}; -static int probe_vfs_argc = 3; +static int probe_vfs_argc; module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" "probe_vf=port1,port2,port1+2"); @@ -104,7 +104,8 @@ module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); -#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE +#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ + MLX4_FUNC_CAP_EQE_CQE_STRIDE) static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" @@ -196,6 +197,40 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) dev->caps.port_mask[i] = dev->caps.port_type[i]; } +static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) +{ + struct mlx4_caps *dev_cap = &dev->caps; + + /* FW not supporting or cancelled by user */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || + !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) + return; + + /* Must have 64B CQE_EQE enabled by FW to use bigger stride + * When FW has NCSI it may decide not to report 64B CQE/EQEs + */ + if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || + !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + return; + } + + if (cache_line_size() == 128 || cache_line_size() == 256) { + mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); + /* Changing the real data inside CQE size to 32B */ + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + + if (mlx4_is_master(dev)) + dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; + } else { + mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } +} + static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; @@ -390,6 +425,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; } + + if (dev_cap->flags2 & + (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | + MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { + mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } } if ((dev->caps.flags & @@ -397,6 +440,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; + if (!mlx4_is_slave(dev)) + mlx4_enable_cqe_eqe_stride(dev); + return 0; } @@ -724,11 +770,22 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param.eqe_size; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param.cqe_size; + /* User still need to know when CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); @@ -2202,115 +2259,18 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) iounmap(owner); } -static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) +static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, + int total_vfs, int *nvfs, struct mlx4_priv *priv) { - struct mlx4_priv *priv; struct mlx4_dev *dev; + unsigned sum = 0; int err; int port; - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { - {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; - unsigned total_vfs = 0; - int sriov_initialized = 0; - unsigned int i; - - pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); - return err; - } - - /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS - * per port, we must limit the number of VFs to 63 (since their are - * 128 MACs) - */ - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; - total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { - nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; - if (nvfs[i] < 0) { - dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); - return -EINVAL; - } - } - for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; - i++) { - prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; - if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { - dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); - return -EINVAL; - } - } - if (total_vfs >= MLX4_MAX_NUM_VF) { - dev_err(&pdev->dev, - "Requested more VF's (%d) than allowed (%d)\n", - total_vfs, MLX4_MAX_NUM_VF - 1); - return -EINVAL; - } - - for (i = 0; i < MLX4_MAX_PORTS; i++) { - if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { - dev_err(&pdev->dev, - "Requested more VF's (%d) for port (%d) than allowed (%d)\n", - nvfs[i] + nvfs[2], i + 1, - MLX4_MAX_NUM_VF_P_PORT - 1); - return -EINVAL; - } - } - - - /* - * Check for BARs. - */ - if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && - !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", - pci_dev_data, pci_resource_flags(pdev, 0)); - err = -ENODEV; - goto err_disable_pdev; - } - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing UAR, aborting\n"); - err = -ENODEV; - goto err_disable_pdev; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); - goto err_disable_pdev; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); - goto err_release_regions; - } - } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); - goto err_release_regions; - } - } + int i; + int existing_vfs = 0; - /* Allow large DMA segments, up to the firmware limit of 1 GB */ - dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); + dev = &priv->dev; - dev = pci_get_drvdata(pdev); - priv = mlx4_priv(dev); - dev->pdev = pdev; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); @@ -2324,28 +2284,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) dev->rev_id = pdev->revision; dev->numa_node = dev_to_node(&pdev->dev); + /* Detect if this device is a virtual function */ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { - /* When acting as pf, we normally skip vfs unless explicitly - * requested to probe them. */ - if (total_vfs) { - unsigned vfs_offset = 0; - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && - vfs_offset + nvfs[i] < extended_func_num(pdev); - vfs_offset += nvfs[i], i++) - ; - if (i == sizeof(nvfs)/sizeof(nvfs[0])) { - err = -ENODEV; - goto err_free_dev; - } - if ((extended_func_num(pdev) - vfs_offset) - > prb_vf[i]) { - mlx4_warn(dev, "Skipping virtual function:%d\n", - extended_func_num(pdev)); - err = -ENODEV; - goto err_free_dev; - } - } mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); dev->flags |= MLX4_FLAG_SLAVE; } else { @@ -2355,11 +2296,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) err = mlx4_get_ownership(dev); if (err) { if (err < 0) - goto err_free_dev; + return err; else { mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); - err = -EINVAL; - goto err_free_dev; + return -EINVAL; } } @@ -2371,21 +2311,28 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) GFP_KERNEL); if (NULL == dev->dev_vfs) { mlx4_err(dev, "Failed to allocate memory for VFs\n"); - err = 0; + err = -ENOMEM; + goto err_free_own; } else { atomic_inc(&pf_loading); - err = pci_enable_sriov(pdev, total_vfs); + existing_vfs = pci_num_vf(pdev); + if (existing_vfs) { + err = 0; + if (existing_vfs != total_vfs) + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", + existing_vfs, total_vfs); + } else { + err = pci_enable_sriov(pdev, total_vfs); + } if (err) { mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", err); atomic_dec(&pf_loading); - err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); dev->flags |= MLX4_FLAG_SRIOV | MLX4_FLAG_MASTER; dev->num_vfs = total_vfs; - sriov_initialized = 1; } } } @@ -2401,7 +2348,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) err = mlx4_reset(dev); if (err) { mlx4_err(dev, "Failed to reset HCA, aborting\n"); - goto err_rel_own; + goto err_sriov; } } @@ -2451,34 +2398,46 @@ slave_start: /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { - unsigned sum = 0; - err = mlx4_multi_func_init(dev); - if (err) { - mlx4_err(dev, "Failed to init master mfunc interface, aborting\n"); + int ib_ports = 0; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + if (ib_ports && + (num_vfs_argc > 1 || probe_vfs_argc > 1)) { + mlx4_err(dev, + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); + err = -EINVAL; + goto err_close; + } + if (dev->caps.num_ports < 2 && + num_vfs_argc > 1) { + err = -EINVAL; + mlx4_err(dev, + "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", + dev->caps.num_ports); goto err_close; } - if (sriov_initialized) { - int ib_ports = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - ib_ports++; + memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); - if (ib_ports && - (num_vfs_argc > 1 || probe_vfs_argc > 1)) { - mlx4_err(dev, - "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); - err = -EINVAL; - goto err_master_mfunc; - } - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { - unsigned j; - for (j = 0; j < nvfs[i]; ++sum, ++j) { - dev->dev_vfs[sum].min_port = - i < 2 ? i + 1 : 1; - dev->dev_vfs[sum].n_ports = i < 2 ? 1 : - dev->caps.num_ports; - } + for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { + unsigned j; + + for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { + dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; + dev->dev_vfs[sum].n_ports = i < 2 ? 1 : + dev->caps.num_ports; } } + + /* In master functions, the communication channel + * must be initialized after obtaining its address from fw + */ + err = mlx4_multi_func_init(dev); + if (err) { + mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); + goto err_close; + } } err = mlx4_alloc_eq_table(dev); @@ -2499,7 +2458,7 @@ slave_start: if (!mlx4_is_slave(dev)) { err = mlx4_init_steering(dev); if (err) - goto err_free_eq; + goto err_disable_msix; } err = mlx4_setup_hca(dev); @@ -2559,6 +2518,10 @@ err_steer: if (!mlx4_is_slave(dev)) mlx4_clear_steering(dev); +err_disable_msix: + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + err_free_eq: mlx4_free_eq_table(dev); @@ -2575,9 +2538,6 @@ err_master_mfunc: } err_close: - if (dev->flags & MLX4_FLAG_MSI_X) - pci_disable_msix(pdev); - mlx4_close_hca(dev); err_mfunc: @@ -2588,20 +2548,154 @@ err_cmd: mlx4_cmd_cleanup(dev); err_sriov: - if (dev->flags & MLX4_FLAG_SRIOV) + if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) pci_disable_sriov(pdev); -err_rel_own: - if (!mlx4_is_slave(dev)) - mlx4_free_ownership(dev); - if (mlx4_is_master(dev) && dev->num_vfs) atomic_dec(&pf_loading); kfree(priv->dev.dev_vfs); -err_free_dev: - kfree(priv); +err_free_own: + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + return err; +} + +static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, + struct mlx4_priv *priv) +{ + int err; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { + {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; + unsigned total_vfs = 0; + unsigned int i; + + pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS + * per port, we must limit the number of VFs to 63 (since their are + * 128 MACs) + */ + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { + nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; + if (nvfs[i] < 0) { + dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + i++) { + prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; + if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { + dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + if (total_vfs >= MLX4_MAX_NUM_VF) { + dev_err(&pdev->dev, + "Requested more VF's (%d) than allowed (%d)\n", + total_vfs, MLX4_MAX_NUM_VF - 1); + err = -EINVAL; + goto err_disable_pdev; + } + + for (i = 0; i < MLX4_MAX_PORTS; i++) { + if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + dev_err(&pdev->dev, + "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + nvfs[i] + nvfs[2], i + 1, + MLX4_MAX_NUM_VF_P_PORT - 1); + err = -EINVAL; + goto err_disable_pdev; + } + } + + /* Check for BARs. */ + if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && + !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + pci_dev_data, pci_resource_flags(pdev, 0)); + err = -ENODEV; + goto err_disable_pdev; + } + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing UAR, aborting\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + + /* Allow large DMA segments, up to the firmware limit of 1 GB */ + dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); + /* Detect if this device is a virtual function */ + if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { + /* When acting as pf, we normally skip vfs unless explicitly + * requested to probe them. + */ + if (total_vfs) { + unsigned vfs_offset = 0; + + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset += nvfs[i], i++) + ; + if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + err = -ENODEV; + goto err_release_regions; + } + if ((extended_func_num(pdev) - vfs_offset) + > prb_vf[i]) { + dev_warn(&pdev->dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_release_regions; + } + } + } + + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + if (err) + goto err_release_regions; + return 0; err_release_regions: pci_release_regions(pdev); @@ -2616,6 +2710,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx4_priv *priv; struct mlx4_dev *dev; + int ret; printk_once(KERN_INFO "%s", mlx4_version); @@ -2624,28 +2719,38 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; dev = &priv->dev; + dev->pdev = pdev; pci_set_drvdata(pdev, dev); priv->pci_dev_data = id->driver_data; - return __mlx4_init_one(pdev, id->driver_data); + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) + kfree(priv); + + return ret; } -static void __mlx4_remove_one(struct pci_dev *pdev) +static void mlx4_unload_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); int pci_dev_data; int p; + int active_vfs = 0; if (priv->removed) return; pci_dev_data = priv->pci_dev_data; - /* in SRIOV it is not allowed to unload the pf's - * driver while there are alive vf's */ - if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) - pr_warn("Removing PF when there are assigned VF's !!!\n"); + /* Disabling SR-IOV is not allowed while there are active vf's */ + if (mlx4_is_master(dev)) { + active_vfs = mlx4_how_many_lives_vf(dev); + if (active_vfs) { + pr_warn("Removing PF when there are active VF's !!\n"); + pr_warn("Will not disable SR-IOV.\n"); + } + } mlx4_stop_sense(dev); mlx4_unregister_device(dev); @@ -2688,7 +2793,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev) if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); - if (dev->flags & MLX4_FLAG_SRIOV) { + if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); dev->num_vfs = 0; @@ -2704,8 +2809,6 @@ static void __mlx4_remove_one(struct pci_dev *pdev) kfree(dev->caps.qp1_proxy); kfree(dev->dev_vfs); - pci_release_regions(pdev); - pci_disable_device(pdev); memset(priv, 0, sizeof(*priv)); priv->pci_dev_data = pci_dev_data; priv->removed = 1; @@ -2716,7 +2819,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); - __mlx4_remove_one(pdev); + mlx4_unload_one(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); kfree(priv); pci_set_drvdata(pdev, NULL); } @@ -2725,11 +2830,22 @@ int mlx4_restart_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); - int pci_dev_data; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int pci_dev_data, err, total_vfs; pci_dev_data = priv->pci_dev_data; - __mlx4_remove_one(pdev); - return __mlx4_init_one(pdev, pci_dev_data); + total_vfs = dev->num_vfs; + memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); + + mlx4_unload_one(pdev); + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + if (err) { + mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", + __func__, pci_name(pdev), err); + return err; + } + + return err; } static const struct pci_device_id mlx4_pci_table[] = { @@ -2783,7 +2899,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table); static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { - __mlx4_remove_one(pdev); + mlx4_unload_one(pdev); return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; @@ -2795,7 +2911,7 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) struct mlx4_priv *priv = mlx4_priv(dev); int ret; - ret = __mlx4_init_one(pdev, priv->pci_dev_data); + ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } @@ -2809,7 +2925,7 @@ static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, - .shutdown = __mlx4_remove_one, + .shutdown = mlx4_unload_one, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index b508c7887ef..de10dbb2e6e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -285,6 +285,9 @@ struct mlx4_icm_table { #define MLX4_MPT_STATUS_SW 0xF0 #define MLX4_MPT_STATUS_HW 0x00 +#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 +#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 3de41be4942..8fef65840b3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -216,13 +216,16 @@ enum cq_type { struct mlx4_en_tx_info { struct sk_buff *skb; - u32 nr_txbb; - u32 nr_bytes; - u8 linear; - u8 data_offset; - u8 inl; - u8 ts_requested; -}; + dma_addr_t map0_dma; + u32 map0_byte_count; + u32 nr_txbb; + u32 nr_bytes; + u8 linear; + u8 data_offset; + u8 inl; + u8 ts_requested; + u8 nr_maps; +} ____cacheline_aligned_in_smp; #define MLX4_EN_BIT_DESC_OWN 0x80000000 @@ -253,39 +256,46 @@ struct mlx4_en_rx_alloc { }; struct mlx4_en_tx_ring { + /* cache line used and dirtied in tx completion + * (mlx4_en_free_tx_buf()) + */ + u32 last_nr_txbb; + u32 cons; + unsigned long wake_queue; + + /* cache line used and dirtied in mlx4_en_xmit() */ + u32 prod ____cacheline_aligned_in_smp; + unsigned long bytes; + unsigned long packets; + unsigned long tx_csum; + unsigned long tso_packets; + unsigned long xmit_more; + struct mlx4_bf bf; + unsigned long queue_stopped; + + /* Following part should be mostly read */ + cpumask_t affinity_mask; + struct mlx4_qp qp; struct mlx4_hwq_resources wqres; - u32 size ; /* number of TXBBs */ - u32 size_mask; - u16 stride; - u16 cqn; /* index of port CQ associated with this ring */ - u32 prod; - u32 cons; - u32 buf_size; - u32 doorbell_qpn; - void *buf; - u16 poll_cnt; - struct mlx4_en_tx_info *tx_info; - u8 *bounce_buf; - u8 queue_index; - cpumask_t affinity_mask; - u32 last_nr_txbb; - struct mlx4_qp qp; - struct mlx4_qp_context context; - int qpn; - enum mlx4_qp_state qp_state; - struct mlx4_srq dummy; - unsigned long bytes; - unsigned long packets; - unsigned long tx_csum; - unsigned long queue_stopped; - unsigned long wake_queue; - struct mlx4_bf bf; - bool bf_enabled; - bool bf_alloced; - struct netdev_queue *tx_queue; - int hwtstamp_tx_type; - int inline_thold; -}; + u32 size; /* number of TXBBs */ + u32 size_mask; + u16 stride; + u16 cqn; /* index of port CQ associated with this ring */ + u32 buf_size; + __be32 doorbell_qpn; + __be32 mr_key; + void *buf; + struct mlx4_en_tx_info *tx_info; + u8 *bounce_buf; + struct mlx4_qp_context context; + int qpn; + enum mlx4_qp_state qp_state; + u8 queue_index; + bool bf_enabled; + bool bf_alloced; + struct netdev_queue *tx_queue; + int hwtstamp_tx_type; +} ____cacheline_aligned_in_smp; struct mlx4_en_rx_desc { /* actual number of entries depends on rx ring stride */ @@ -426,6 +436,7 @@ struct mlx4_en_pkt_stats { struct mlx4_en_port_stats { unsigned long tso_packets; + unsigned long xmit_more; unsigned long queue_stopped; unsigned long wake_queue; unsigned long tx_timeout; @@ -433,7 +444,7 @@ struct mlx4_en_port_stats { unsigned long rx_chksum_good; unsigned long rx_chksum_none; unsigned long tx_chksum_offload; -#define NUM_PORT_STATS 8 +#define NUM_PORT_STATS 9 }; struct mlx4_en_perf_stats { @@ -542,6 +553,7 @@ struct mlx4_en_priv { unsigned max_mtu; int base_qpn; int cqe_factor; + int cqe_size; struct mlx4_en_rss_map rss_map; __be32 ctrl_flags; @@ -612,6 +624,11 @@ struct mlx4_mac_entry { struct rcu_head rcu; }; +static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) +{ + return buf + idx * cqe_sz; +} + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) { @@ -836,8 +853,8 @@ extern const struct ethtool_ops mlx4_en_ethtool_ops; */ __printf(3, 4) -int en_print(const char *level, const struct mlx4_en_priv *priv, - const char *format, ...); +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...); #define en_dbg(mlevel, priv, format, ...) \ do { \ diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7d717eccb7b..193a6adb5d0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } +/* Must protect against concurrent access */ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, struct mlx4_mpt_entry ***mpt_entry) { @@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); struct mlx4_cmd_mailbox *mailbox = NULL; - /* Make sure that at this point we have single-threaded access only */ - if (mmr->enabled != MLX4_MPT_EN_HW) return -EINVAL; err = mlx4_HW2SW_MPT(dev, NULL, key); - if (err) { mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); @@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 0, MLX4_CMD_QUERY_MPT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); - if (err) goto free_mailbox; @@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, err = mlx4_SW2HW_MPT(dev, mailbox, key); } - mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; - if (!err) + if (!err) { + mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; mmr->enabled = MLX4_MPT_EN_HW; + } return err; } EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); @@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, u32 pdn) { - u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); + u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; /* The wrapper function will put the slave's id here */ if (mlx4_is_mfunc(dev)) pd_flags &= ~MLX4_MPT_PD_VF_MASK; - mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | + + mpt_entry->pd_flags = cpu_to_be32(pd_flags | (pdn & MLX4_MPT_PD_MASK) | MLX4_MPT_PD_FLAG_EN_INV); return 0; @@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, { int err; - mpt_entry->start = cpu_to_be64(mr->iova); - mpt_entry->length = cpu_to_be64(mr->size); - mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); + mpt_entry->start = cpu_to_be64(iova); + mpt_entry->length = cpu_to_be64(size); + mpt_entry->entity_size = cpu_to_be32(page_shift); err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); if (err) return err; + mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | + MLX4_MPT_PD_FLAG_EN_INV); + mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | + MLX4_MPT_FLAG_SW_OWNS); if (mr->mtt.order < 0) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); mpt_entry->mtt_addr = 0; @@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, if (mr->mtt.page_shift == 0) mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); } + if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { + /* fast register MR in free state */ + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | + MLX4_MPT_PD_FLAG_RAE); + } else { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); + } mr->enabled = MLX4_MPT_EN_SW; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 9ba0c1ca10d..94eeb2c7d7e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev, int i; for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if ((mac & MLX4_MAC_MASK) == + if (table->refs[i] && + (MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) return i; } @@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) mutex_lock(&table->mutex); for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if (free < 0 && !table->entries[i]) { - free = i; + if (!table->refs[i]) { + if (free < 0) + free = i; continue; } - if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + if ((MLX4_MAC_MASK & mac) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { /* MAC already registered, increment ref count */ err = i; ++table->refs[i]; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3..2301365c79c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -390,13 +390,14 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC -int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_update_qp_context *cmd; u64 pri_addr_path_mask = 0; + u64 qp_mask = 0; int err = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, cmd->qp_context.pri_path.grh_mylmc = params->smac_index; } + if (attr & MLX4_UPDATE_QP_VSD) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; + if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) + cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); + } + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + cmd->qp_mask = cpu_to_be64(qp_mask); - err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1089367fed2..5d2498dcf53 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; + u32 qp_type; int port; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; priv = mlx4_priv(dev); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; if (MLX4_VGT != vp_oper->state.default_vlan) { /* the reserved QPs (special, proxy, tunnel) @@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (mlx4_is_qp_reserved(dev, qpn)) return 0; - /* force strip vlan by clear vsd */ - qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ + if (qp_type == MLX4_QP_ST_UD || + (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { + if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { + *(__be32 *)inbox->buf = + cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | + MLX4_QP_OPTPAR_VLAN_STRIPPING); + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + } else { + struct mlx4_update_qp_params params = {.flags = 0}; + + mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + } + } if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { @@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, } port = (rqp->sched_queue >> 6 & 1) + 1; - smac_index = cmd->qp_context.pri_path.grh_mylmc; - err = mac_find_smac_ix_in_slave(dev, slave, port, - smac_index, &mac); - if (err) { - mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", - qpn, smac_index); - goto err_mac; + + if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } } err = mlx4_cmd(dev, inbox->dma, @@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; upd_context = mailbox->buf; - upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); + upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |