summaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/cq.c11
-rw-r--r--drivers/net/mlx4/en_cq.c13
-rw-r--r--drivers/net/mlx4/en_main.c11
-rw-r--r--drivers/net/mlx4/en_netdev.c56
-rw-r--r--drivers/net/mlx4/en_params.c107
-rw-r--r--drivers/net/mlx4/en_rx.c9
-rw-r--r--drivers/net/mlx4/en_tx.c29
-rw-r--r--drivers/net/mlx4/eq.c121
-rw-r--r--drivers/net/mlx4/main.c53
-rw-r--r--drivers/net/mlx4/mcg.c25
-rw-r--r--drivers/net/mlx4/mlx4.h14
-rw-r--r--drivers/net/mlx4/mlx4_en.h18
-rw-r--r--drivers/net/mlx4/profile.c4
13 files changed, 268 insertions, 203 deletions
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index b7ad2829d67..ac57b6a42c6 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mlx4_cq_resize);
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- int collapsed)
+ unsigned vector, int collapsed)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -198,6 +198,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
u64 mtt_addr;
int err;
+ if (vector >= dev->caps.num_comp_vectors)
+ return -EINVAL;
+
+ cq->vector = vector;
+
cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
if (cq->cqn == -1)
return -ENOMEM;
@@ -227,7 +232,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
cq_context->flags = cpu_to_be32(!!collapsed << 18);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
- cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
+ cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -276,7 +281,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq);
+ synchronize_irq(priv->eq_table.eq[cq->vector].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 1368a8010af..91f50de84be 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -51,10 +51,13 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX)
+ if (mode == RX) {
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- else
+ cq->vector = ring % mdev->dev->caps.num_comp_vectors;
+ } else {
cq->buf_size = sizeof(struct mlx4_cqe);
+ cq->vector = 0;
+ }
cq->ring = ring;
cq->is_tx = mode;
@@ -68,6 +71,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+ else
+ cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
return err;
}
@@ -82,11 +87,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
cq->mcq.arm_db = cq->wqres.db.db + 1;
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
- cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
memset(cq->buf, 0, cq->buf_size);
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->is_tx);
+ cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
if (err)
return err;
@@ -136,7 +140,6 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
- cq->armed = 1;
mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
&priv->mdev->uar_lock);
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 4b9794e97a7..eda72dd2120 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -169,13 +169,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
mdev->profile.prof[i].tx_ring_num, i);
- if (!mdev->profile.prof[i].rx_ring_num) {
- mdev->profile.prof[i].rx_ring_num = 1;
- mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
- 1, i);
- } else
- mlx4_info(mdev, "Using %d rx rings for port:%d\n",
- mdev->profile.prof[i].rx_ring_num, i);
+ mdev->profile.prof[i].rx_ring_num =
+ min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS);
+ mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
+ mdev->profile.prof[i].rx_ring_num, i);
}
/* Create our own workqueue for reset/multicast tasks
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 96e709d6440..15bb38d9930 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -369,7 +369,6 @@ static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
int i;
@@ -379,15 +378,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
* satisfy our coelsing target.
* - moder_time is set to a fixed value.
*/
- priv->rx_frames = (mdev->profile.rx_moder_cnt ==
- MLX4_EN_AUTO_CONF) ?
- MLX4_EN_RX_COAL_TARGET /
- priv->dev->mtu + 1 :
- mdev->profile.rx_moder_cnt;
- priv->rx_usecs = (mdev->profile.rx_moder_time ==
- MLX4_EN_AUTO_CONF) ?
- MLX4_EN_RX_COAL_TIME :
- mdev->profile.rx_moder_time;
+ priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1;
+ priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
"rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -411,7 +403,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
- priv->adaptive_rx_coal = mdev->profile.auto_moder;
+ priv->adaptive_rx_coal = 1;
priv->last_moder_time = MLX4_EN_AUTO_CONF;
priv->last_moder_jiffies = 0;
priv->last_moder_packets = 0;
@@ -560,7 +552,7 @@ static void mlx4_en_linkstate(struct work_struct *work)
}
-static int mlx4_en_start_port(struct net_device *dev)
+int mlx4_en_start_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -715,7 +707,7 @@ cq_err:
}
-static void mlx4_en_stop_port(struct net_device *dev)
+void mlx4_en_stop_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -834,7 +826,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
@@ -853,7 +845,7 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
}
}
-static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile *prof = priv->prof;
@@ -953,6 +945,23 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static const struct net_device_ops mlx4_netdev_ops = {
+ .ndo_open = mlx4_en_open,
+ .ndo_stop = mlx4_en_close,
+ .ndo_start_xmit = mlx4_en_xmit,
+ .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_set_multicast_list = mlx4_en_set_multicast,
+ .ndo_set_mac_address = mlx4_en_set_mac,
+ .ndo_change_mtu = mlx4_en_change_mtu,
+ .ndo_tx_timeout = mlx4_en_tx_timeout,
+ .ndo_vlan_rx_register = mlx4_en_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx4_en_netpoll,
+#endif
+};
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -1029,22 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/*
* Initialize netdev entry points
*/
-
- dev->open = &mlx4_en_open;
- dev->stop = &mlx4_en_close;
- dev->hard_start_xmit = &mlx4_en_xmit;
- dev->get_stats = &mlx4_en_get_stats;
- dev->set_multicast_list = &mlx4_en_set_multicast;
- dev->set_mac_address = &mlx4_en_set_mac;
- dev->change_mtu = &mlx4_en_change_mtu;
- dev->tx_timeout = &mlx4_en_tx_timeout;
+ dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
- dev->vlan_rx_register = mlx4_en_vlan_rx_register;
- dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
- dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = mlx4_en_netpoll;
-#endif
+
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
/* Set defualt MAC */
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
index 95706ee1c01..cfeef0f1bac 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_params.c
@@ -60,49 +60,26 @@ MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
"Number of LRO sessions per ring or disabled (0)");
/* Priority pausing */
-MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
- "Pause policy on TX: 0 never generate pause frames "
- "1 generate pause frames according to RX buffer threshold");
-MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
- "Pause policy on RX: 0 ignore received pause frames "
- "1 respect received pause frames");
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
" Per priority bit mask");
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
-/* Interrupt moderation tunning */
-MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
- "Max coalesced descriptors for Rx interrupt moderation");
-MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
- "Timeout following last packet for Rx interrupt moderation");
-MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
-
-MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
-MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
-
-MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
-MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
-MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
-MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
-
-
int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
int i;
- params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
- params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
- params->auto_moder = auto_moder;
params->rss_xor = (rss_xor != 0);
params->rss_mask = rss_mask & 0x1f;
params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
- params->prof[i].rx_pause = pprx;
+ params->prof[i].rx_pause = 1;
params->prof[i].rx_ppp = pfcrx;
- params->prof[i].tx_pause = pptx;
+ params->prof[i].tx_pause = 1;
params->prof[i].tx_ppp = pfctx;
+ params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
}
if (pfcrx || pfctx) {
params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
@@ -111,32 +88,7 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[1].tx_ring_num = 1;
params->prof[2].tx_ring_num = 1;
}
- params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
- params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
-
- if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
- tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
- params->prof[1].tx_ring_size =
- (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
- MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
-
- if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
- tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
- params->prof[2].tx_ring_size =
- (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
- MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
-
- if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
- rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
- params->prof[1].rx_ring_size =
- (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
- MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
-
- if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
- rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
- params->prof[2].rx_ring_size =
- (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
- MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
+
return 0;
}
@@ -433,6 +385,54 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
pause->rx_pause = priv->prof->rx_pause;
}
+static int mlx4_en_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ u32 rx_size, tx_size;
+ int port_up = 0;
+ int err = 0;
+
+ if (param->rx_jumbo_pending || param->rx_mini_pending)
+ return -EINVAL;
+
+ rx_size = roundup_pow_of_two(param->rx_pending);
+ rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
+ tx_size = roundup_pow_of_two(param->tx_pending);
+ tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
+
+ if (rx_size == priv->prof->rx_ring_size &&
+ tx_size == priv->prof->tx_ring_size)
+ return 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ priv->prof->tx_ring_size = tx_size;
+ priv->prof->rx_ring_size = rx_size;
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ mlx4_err(mdev, "Failed reallocating port resources\n");
+ goto out;
+ }
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ mlx4_err(mdev, "Failed starting port\n");
+ }
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
static void mlx4_en_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
@@ -472,6 +472,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
+ .set_ringparam = mlx4_en_set_ringparam,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 6232227f56c..c61b0bdca1a 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -443,7 +443,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Fill Rx buffers */
ring->full = 0;
}
- if (mlx4_en_fill_rx_buffers(priv))
+ err = mlx4_en_fill_rx_buffers(priv);
+ if (err)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -776,8 +777,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
} else
netif_receive_skb(skb);
- dev->last_rx = jiffies;
-
next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
@@ -815,7 +814,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
if (priv->port_up)
- netif_rx_schedule(cq->dev, &cq->napi);
+ netif_rx_schedule(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
@@ -835,7 +834,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
INC_PERF_COUNTER(priv->pstats.napi_quota);
else {
/* Done for now */
- netif_rx_complete(dev, napi);
+ netif_rx_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
return done;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 8592f8fb847..ff4d75205c2 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -379,8 +379,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
/* Wakeup Tx queue if this ring stopped it */
if (unlikely(ring->blocked)) {
- if (((u32) (ring->prod - ring->cons) <=
- ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
+ if ((u32) (ring->prod - ring->cons) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS) {
/* TODO: support multiqueue netdevs. Currently, we block
* when *any* ring is full. Note that:
@@ -404,14 +404,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- spin_lock_irq(&ring->comp_lock);
- cq->armed = 0;
+ if (!spin_trylock(&ring->comp_lock))
+ return;
mlx4_en_process_tx_cq(cq->dev, cq);
- if (ring->blocked)
- mlx4_en_arm_cq(priv, cq);
- else
- mod_timer(&cq->timer, jiffies + 1);
- spin_unlock_irq(&ring->comp_lock);
+ mod_timer(&cq->timer, jiffies + 1);
+ spin_unlock(&ring->comp_lock);
}
@@ -424,8 +421,10 @@ void mlx4_en_poll_tx_cq(unsigned long data)
INC_PERF_COUNTER(priv->pstats.tx_poll);
- netif_tx_lock(priv->dev);
- spin_lock_irq(&ring->comp_lock);
+ if (!spin_trylock(&ring->comp_lock)) {
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+ return;
+ }
mlx4_en_process_tx_cq(cq->dev, cq);
inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
@@ -435,8 +434,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
- spin_unlock_irq(&ring->comp_lock);
- netif_tx_unlock(priv->dev);
+ spin_unlock(&ring->comp_lock);
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -479,7 +477,10 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
- mlx4_en_process_tx_cq(priv->dev, cq);
+ if (spin_trylock(&ring->comp_lock)) {
+ mlx4_en_process_tx_cq(priv->dev, cq);
+ spin_unlock(&ring->comp_lock);
+ }
}
static void *get_frag_ptr(struct sk_buff *skb)
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index de169338cd9..2c19bff7cba 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -243,10 +243,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
* least that often.
*/
if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
- /*
- * Conditional on hca_type is OK here because
- * this is a rare case, not the fast path.
- */
eq_set_ci(eq, 0);
set_ci = 0;
}
@@ -266,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
@@ -304,6 +300,17 @@ static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
MLX4_CMD_TIME_CLASS_A);
}
+static int mlx4_num_eq_uar(struct mlx4_dev *dev)
+{
+ /*
+ * Each UAR holds 4 EQ doorbells. To figure out how many UARs
+ * we need to map, take the difference of highest index and
+ * the lowest index we'll use and add 1.
+ */
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+ dev->caps.reserved_eqs / 4 + 1;
+}
+
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -483,9 +490,11 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq)
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+
+ kfree(eq_table->irq_names);
}
static int mlx4_map_clr_int(struct mlx4_dev *dev)
@@ -551,57 +560,93 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
__free_page(priv->eq_table.icm_page);
}
+int mlx4_alloc_eq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
+ sizeof *priv->eq_table.eq, GFP_KERNEL);
+ if (!priv->eq_table.eq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_free_eq_table(struct mlx4_dev *dev)
+{
+ kfree(mlx4_priv(dev)->eq_table.eq);
+}
+
int mlx4_init_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int i;
+ priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
+ mlx4_num_eq_uar(dev), GFP_KERNEL);
+ if (!priv->eq_table.uar_map) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
if (err)
- return err;
+ goto err_out_free;
- for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
+ for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
err = mlx4_map_clr_int(dev);
if (err)
- goto err_out_free;
+ goto err_out_bitmap;
priv->eq_table.clr_mask =
swab32(1 << (priv->eq_table.inta_pin & 31));
priv->eq_table.clr_int = priv->clr_base +
(priv->eq_table.inta_pin < 32 ? 4 : 0);
- err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
- &priv->eq_table.eq[MLX4_EQ_COMP]);
- if (err)
- goto err_out_unmap;
+ priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
+ if (!priv->eq_table.irq_names) {
+ err = -ENOMEM;
+ goto err_out_bitmap;
+ }
+
+ for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
+ err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
+ &priv->eq_table.eq[i]);
+ if (err)
+ goto err_out_unmap;
+ }
err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
- &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
+ &priv->eq_table.eq[dev->caps.num_comp_vectors]);
if (err)
goto err_out_comp;
if (dev->flags & MLX4_FLAG_MSI_X) {
- static const char *eq_name[] = {
- [MLX4_EQ_COMP] = DRV_NAME " (comp)",
- [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
- };
+ static const char async_eq_name[] = "mlx4-async";
+ const char *eq_name;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i < dev->caps.num_comp_vectors) {
+ snprintf(priv->eq_table.irq_names + i * 16, 16,
+ "mlx4-comp-%d", i);
+ eq_name = priv->eq_table.irq_names + i * 16;
+ } else
+ eq_name = async_eq_name;
- for (i = 0; i < MLX4_NUM_EQ; ++i) {
err = request_irq(priv->eq_table.eq[i].irq,
- mlx4_msi_x_interrupt,
- 0, eq_name[i], priv->eq_table.eq + i);
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + i);
if (err)
goto err_out_async;
priv->eq_table.eq[i].have_irq = 1;
}
-
} else {
err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, DRV_NAME, dev);
@@ -612,28 +657,36 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
}
err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
- priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
- priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
eq_set_ci(&priv->eq_table.eq[i], 1);
return 0;
err_out_async:
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
err_out_comp:
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
+ i = dev->caps.num_comp_vectors - 1;
err_out_unmap:
+ while (i >= 0) {
+ mlx4_free_eq(dev, &priv->eq_table.eq[i]);
+ --i;
+ }
mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
-err_out_free:
+err_out_bitmap:
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+
+err_out_free:
+ kfree(priv->eq_table.uar_map);
+
return err;
}
@@ -643,18 +696,20 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
int i;
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
- priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
mlx4_free_irqs(dev);
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
mlx4_unmap_clr_int(dev);
- for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
+ for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
iounmap(priv->eq_table.uar_map[i]);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+
+ kfree(priv->eq_table.uar_map);
}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 90a0281d15e..710c79e7a2d 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -421,9 +421,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz,
- roundup_pow_of_two(MLX4_NUM_EQ +
- dev->caps.reserved_eqs),
- MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0);
+ dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
if (err)
goto err_cq;
@@ -810,12 +808,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X "
"interrupt IRQ %d).\n",
- priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
+ priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_warn(dev, "Trying again without MSI-X.\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt "
"(IRQ %d), aborting.\n",
- priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
+ priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -908,31 +906,50 @@ err_uar_table_free:
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct msix_entry entries[MLX4_NUM_EQ];
+ struct msix_entry *entries;
+ int nreq;
int err;
int i;
if (msi_x) {
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ nreq = min(dev->caps.num_eqs - dev->caps.reserved_eqs,
+ num_possible_cpus() + 1);
+ entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
+ if (!entries)
+ goto no_msi;
+
+ for (i = 0; i < nreq; ++i)
entries[i].entry = i;
- err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
+ retry:
+ err = pci_enable_msix(dev->pdev, entries, nreq);
if (err) {
- if (err > 0)
- mlx4_info(dev, "Only %d MSI-X vectors available, "
- "not using MSI-X\n", err);
+ /* Try again if at least 2 vectors are available */
+ if (err > 1) {
+ mlx4_info(dev, "Requested %d vectors, "
+ "but only %d MSI-X vectors available, "
+ "trying again\n", nreq, err);
+ nreq = err;
+ goto retry;
+ }
+
goto no_msi;
}
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ dev->caps.num_comp_vectors = nreq - 1;
+ for (i = 0; i < nreq; ++i)
priv->eq_table.eq[i].irq = entries[i].vector;
dev->flags |= MLX4_FLAG_MSI_X;
+
+ kfree(entries);
return;
}
no_msi:
- for (i = 0; i < MLX4_NUM_EQ; ++i)
+ dev->caps.num_comp_vectors = 1;
+
+ for (i = 0; i < 2; ++i)
priv->eq_table.eq[i].irq = dev->pdev->irq;
}
@@ -1074,6 +1091,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_cmd;
+ err = mlx4_alloc_eq_table(dev);
+ if (err)
+ goto err_close;
+
mlx4_enable_msi_x(dev);
err = mlx4_setup_hca(dev);
@@ -1084,7 +1105,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (err)
- goto err_close;
+ goto err_free_eq;
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
@@ -1114,6 +1135,9 @@ err_port:
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
+err_free_eq:
+ mlx4_free_eq_table(dev);
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
@@ -1177,6 +1201,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
+ mlx4_free_eq_table(dev);
mlx4_close_hca(dev);
mlx4_cmd_cleanup(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 592c01ae2c5..6053c357a47 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -118,17 +118,7 @@ static int find_mgm(struct mlx4_dev *dev,
return err;
if (0)
- mlx4_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
- "%04x:%04x:%04x:%04x is %04x\n",
- be16_to_cpu(((__be16 *) gid)[0]),
- be16_to_cpu(((__be16 *) gid)[1]),
- be16_to_cpu(((__be16 *) gid)[2]),
- be16_to_cpu(((__be16 *) gid)[3]),
- be16_to_cpu(((__be16 *) gid)[4]),
- be16_to_cpu(((__be16 *) gid)[5]),
- be16_to_cpu(((__be16 *) gid)[6]),
- be16_to_cpu(((__be16 *) gid)[7]),
- *hash);
+ mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
*index = *hash;
*prev = -1;
@@ -215,7 +205,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (block_mcast_loopback)
mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
- (1 << MGM_BLCK_LB_BIT));
+ (1U << MGM_BLCK_LB_BIT));
else
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
@@ -277,16 +267,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
goto out;
if (index == -1) {
- mlx4_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
- "not found\n",
- be16_to_cpu(((__be16 *) gid)[0]),
- be16_to_cpu(((__be16 *) gid)[1]),
- be16_to_cpu(((__be16 *) gid)[2]),
- be16_to_cpu(((__be16 *) gid)[3]),
- be16_to_cpu(((__be16 *) gid)[4]),
- be16_to_cpu(((__be16 *) gid)[5]),
- be16_to_cpu(((__be16 *) gid)[6]),
- be16_to_cpu(((__be16 *) gid)[7]));
+ mlx4_err(dev, "MGID %pI6 not found\n", gid);
err = -EINVAL;
goto out;
}
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 34c909deaff..e0213bad61c 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -63,12 +63,6 @@ enum {
};
enum {
- MLX4_EQ_ASYNC,
- MLX4_EQ_COMP,
- MLX4_NUM_EQ
-};
-
-enum {
MLX4_NUM_PDS = 1 << 15
};
@@ -205,10 +199,11 @@ struct mlx4_cq_table {
struct mlx4_eq_table {
struct mlx4_bitmap bitmap;
+ char *irq_names;
void __iomem *clr_int;
- void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4];
+ void __iomem **uar_map;
u32 clr_mask;
- struct mlx4_eq eq[MLX4_NUM_EQ];
+ struct mlx4_eq *eq;
u64 icm_virt;
struct page *icm_page;
dma_addr_t icm_dma;
@@ -328,6 +323,9 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
int mlx4_reset(struct mlx4_dev *dev);
+int mlx4_alloc_eq_table(struct mlx4_dev *dev);
+void mlx4_free_eq_table(struct mlx4_dev *dev);
+
int mlx4_init_pd_table(struct mlx4_dev *dev);
int mlx4_init_uar_table(struct mlx4_dev *dev);
int mlx4_init_mr_table(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 98ddc0811f9..2e96c7b2180 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -58,17 +58,17 @@
#define mlx4_dbg(mlevel, priv, format, arg...) \
if (NETIF_MSG_##mlevel & priv->msg_enable) \
printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
- (&priv->mdev->pdev->dev)->bus_id , ## arg)
+ (dev_name(&priv->mdev->pdev->dev)) , ## arg)
#define mlx4_err(mdev, format, arg...) \
printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
- (&mdev->pdev->dev)->bus_id , ## arg)
+ (dev_name(&mdev->pdev->dev)) , ## arg)
#define mlx4_info(mdev, format, arg...) \
printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
- (&mdev->pdev->dev)->bus_id , ## arg)
+ (dev_name(&mdev->pdev->dev)) , ## arg)
#define mlx4_warn(mdev, format, arg...) \
printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
- (&mdev->pdev->dev)->bus_id , ## arg)
+ (dev_name(&mdev->pdev->dev)) , ## arg)
/*
* Device constants
@@ -311,7 +311,6 @@ struct mlx4_en_cq {
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
- int armed;
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
};
@@ -334,9 +333,6 @@ struct mlx4_en_profile {
u8 rss_mask;
u32 active_ports;
u32 small_pkt_int;
- int rx_moder_cnt;
- int rx_moder_time;
- int auto_moder;
u8 no_reset;
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
};
@@ -493,6 +489,12 @@ void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
+int mlx4_en_start_port(struct net_device *dev);
+void mlx4_en_stop_port(struct net_device *dev);
+
+void mlx4_en_free_resources(struct mlx4_en_priv *priv);
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+
int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 9ca42b213d5..919fb9eb1b6 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,7 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs;
+ profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs,
+ dev_cap->reserved_eqs +
+ num_possible_cpus() + 1);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt;