diff options
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r-- | drivers/net/mlx4/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/mlx4/en_cq.c | 7 | ||||
-rw-r--r-- | drivers/net/mlx4/en_ethtool.c (renamed from drivers/net/mlx4/en_params.c) | 67 | ||||
-rw-r--r-- | drivers/net/mlx4/en_main.c | 68 | ||||
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 201 | ||||
-rw-r--r-- | drivers/net/mlx4/en_rx.c | 139 | ||||
-rw-r--r-- | drivers/net/mlx4/en_tx.c | 120 | ||||
-rw-r--r-- | drivers/net/mlx4/eq.c | 8 | ||||
-rw-r--r-- | drivers/net/mlx4/main.c | 14 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4_en.h | 49 | ||||
-rw-r--r-- | drivers/net/mlx4/mr.c | 13 | ||||
-rw-r--r-- | drivers/net/mlx4/profile.c | 2 |
12 files changed, 331 insertions, 359 deletions
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index 21040a0d81f..1fd068e1d93 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -5,5 +5,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ obj-$(CONFIG_MLX4_EN) += mlx4_en.o -mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \ +mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ en_resources.o en_netdev.o diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 91f50de84be..21786ad4455 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c @@ -89,6 +89,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); + if (!cq->is_tx) + cq->size = priv->rx_ring[cq->ring].actual_size; + err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); if (err) @@ -125,8 +128,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (cq->is_tx) del_timer(&cq->timer); - else + else { napi_disable(&cq->napi); + netif_napi_del(&cq->napi); + } mlx4_cq_free(mdev->dev, &cq->mcq); } diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_ethtool.c index c1bd040b9e0..091f99052c9 100644 --- a/drivers/net/mlx4/en_params.c +++ b/drivers/net/mlx4/en_ethtool.c @@ -38,64 +38,6 @@ #include "mlx4_en.h" #include "en_port.h" -#define MLX4_EN_PARM_INT(X, def_val, desc) \ - static unsigned int X = def_val;\ - module_param(X , uint, 0444); \ - MODULE_PARM_DESC(X, desc); - - -/* - * Device scope module parameters - */ - - -/* Use a XOR rathern than Toeplitz hash function for RSS */ -MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); - -/* RSS hash type mask - default to <saddr, daddr, sport, dport> */ -MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); - -/* Number of LRO sessions per Rx ring (rounded up to a power of two) */ -MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS, - "Number of LRO sessions per ring or disabled (0)"); - -/* Priority pausing */ -MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." - " Per priority bit mask"); -MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." - " Per priority bit mask"); - -int mlx4_en_get_profile(struct mlx4_en_dev *mdev) -{ - struct mlx4_en_profile *params = &mdev->profile; - int i; - - params->rss_xor = (rss_xor != 0); - params->rss_mask = rss_mask & 0x1f; - params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); - for (i = 1; i <= MLX4_MAX_PORTS; i++) { - params->prof[i].rx_pause = 1; - params->prof[i].rx_ppp = pfcrx; - params->prof[i].tx_pause = 1; - params->prof[i].tx_ppp = pfctx; - params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; - params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; - } - if (pfcrx || pfctx) { - params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; - params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM; - } else { - params->prof[1].tx_ring_num = 1; - params->prof[2].tx_ring_num = 1; - } - - return 0; -} - - -/* - * Ethtool support - */ static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv) { @@ -326,8 +268,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev, priv->rx_frames = (coal->rx_max_coalesced_frames == MLX4_EN_AUTO_CONF) ? - MLX4_EN_RX_COAL_TARGET / - priv->dev->mtu + 1 : + MLX4_EN_RX_COAL_TARGET : coal->rx_max_coalesced_frames; priv->rx_usecs = (coal->rx_coalesce_usecs == MLX4_EN_AUTO_CONF) ? @@ -371,7 +312,7 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, priv->prof->rx_pause, priv->prof->rx_ppp); if (err) - mlx4_err(mdev, "Failed setting pause params to\n"); + en_err(priv, "Failed setting pause params\n"); return err; } @@ -421,13 +362,13 @@ static int mlx4_en_set_ringparam(struct net_device *dev, err = mlx4_en_alloc_resources(priv); if (err) { - mlx4_err(mdev, "Failed reallocating port resources\n"); + en_err(priv, "Failed reallocating port resources\n"); goto out; } if (port_up) { err = mlx4_en_start_port(dev); if (err) - mlx4_err(mdev, "Failed starting port\n"); + en_err(priv, "Failed starting port\n"); } out: diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 510633fd57f..9ed4a158f89 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c @@ -51,6 +51,55 @@ static const char mlx4_en_version[] = DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; +#define MLX4_EN_PARM_INT(X, def_val, desc) \ + static unsigned int X = def_val;\ + module_param(X , uint, 0444); \ + MODULE_PARM_DESC(X, desc); + + +/* + * Device scope module parameters + */ + + +/* Use a XOR rathern than Toeplitz hash function for RSS */ +MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); + +/* RSS hash type mask - default to <saddr, daddr, sport, dport> */ +MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); + +/* Number of LRO sessions per Rx ring (rounded up to a power of two) */ +MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS, + "Number of LRO sessions per ring or disabled (0)"); + +/* Priority pausing */ +MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." + " Per priority bit mask"); +MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." + " Per priority bit mask"); + +static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) +{ + struct mlx4_en_profile *params = &mdev->profile; + int i; + + params->rss_xor = (rss_xor != 0); + params->rss_mask = rss_mask & 0x1f; + params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); + for (i = 1; i <= MLX4_MAX_PORTS; i++) { + params->prof[i].rx_pause = 1; + params->prof[i].rx_ppp = pfcrx; + params->prof[i].tx_pause = 1; + params->prof[i].tx_ppp = pfctx; + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + + (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; + } + + return 0; +} + static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, enum mlx4_dev_event event, int port) { @@ -194,28 +243,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev) /* Create a netdev for each port */ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_info(mdev, "Activating port:%d\n", i); - if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) { + if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) mdev->pndev[i] = NULL; - goto err_free_netdev; - } } return mdev; - -err_free_netdev: - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - if (mdev->pndev[i]) - mlx4_en_destroy_netdev(mdev->pndev[i]); - } - - mutex_lock(&mdev->state_lock); - mdev->device_up = false; - mutex_unlock(&mdev->state_lock); - flush_workqueue(mdev->workqueue); - - /* Stop event queue before we drop down to release shared SW state */ - destroy_workqueue(mdev->workqueue); - err_mr: mlx4_mr_free(dev, &mdev->mr); err_uar: diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 7bcc49de163..e02bafdd368 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group * struct mlx4_en_dev *mdev = priv->mdev; int err; - mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); + en_dbg(HW, priv, "Registering VLAN group:%p\n", grp); priv->vlgrp = grp; mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); if (err) - mlx4_err(mdev, "Failed configuring VLAN filter\n"); + en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); } @@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) if (!priv->vlgrp) return; - mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", - vid, vlan_group_get_device(priv->vlgrp, vid)); + en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", + vid, vlan_group_get_device(priv->vlgrp, vid)); /* Add VID to port VLAN filter */ mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); if (err) - mlx4_err(mdev, "Failed configuring VLAN filter\n"); + en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); } @@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) if (!priv->vlgrp) return; - mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " - "entry:%p)\n", vid, priv->vlgrp, - vlan_group_get_device(priv->vlgrp, vid)); + en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n", + vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid)); vlan_group_set_device(priv->vlgrp, vid, NULL); /* Remove VID from port VLAN filter */ @@ -104,7 +103,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); if (err) - mlx4_err(mdev, "Failed configuring VLAN filter\n"); + en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); } @@ -150,9 +149,10 @@ static void mlx4_en_do_set_mac(struct work_struct *work) err = mlx4_register_mac(mdev->dev, priv->port, priv->mac, &priv->mac_index); if (err) - mlx4_err(mdev, "Failed changing HW MAC address\n"); + en_err(priv, "Failed changing HW MAC address\n"); } else - mlx4_dbg(HW, priv, "Port is down, exiting...\n"); + en_dbg(HW, priv, "Port is down while " + "registering mac, exiting...\n"); mutex_unlock(&mdev->state_lock); } @@ -174,7 +174,6 @@ static void mlx4_en_clear_list(struct net_device *dev) static void mlx4_en_cache_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; struct dev_mc_list *mclist; struct dev_mc_list *tmp; struct dev_mc_list *plist = NULL; @@ -182,7 +181,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev) for (mclist = dev->mc_list; mclist; mclist = mclist->next) { tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); if (!tmp) { - mlx4_err(mdev, "failed to allocate multicast list\n"); + en_err(priv, "failed to allocate multicast list\n"); mlx4_en_clear_list(dev); return; } @@ -219,13 +218,13 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) mutex_lock(&mdev->state_lock); if (!mdev->device_up) { - mlx4_dbg(HW, priv, "Card is not up, ignoring " - "multicast change.\n"); + en_dbg(HW, priv, "Card is not up, " + "ignoring multicast change.\n"); goto out; } if (!priv->port_up) { - mlx4_dbg(HW, priv, "Port is down, ignoring " - "multicast change.\n"); + en_dbg(HW, priv, "Port is down, " + "ignoring multicast change.\n"); goto out; } @@ -236,29 +235,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) if (dev->flags & IFF_PROMISC) { if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { if (netif_msg_rx_status(priv)) - mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", - priv->port); + en_warn(priv, "Entering promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_PROMISC; /* Enable promiscouos mode */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 1); if (err) - mlx4_err(mdev, "Failed enabling " - "promiscous mode\n"); + en_err(priv, "Failed enabling " + "promiscous mode\n"); /* Disable port multicast filter (unconditionally) */ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) - mlx4_err(mdev, "Failed disabling " - "multicast filter\n"); + en_err(priv, "Failed disabling " + "multicast filter\n"); /* Disable port VLAN filter */ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); if (err) - mlx4_err(mdev, "Failed disabling " - "VLAN filter\n"); + en_err(priv, "Failed disabling VLAN filter\n"); } goto out; } @@ -269,20 +266,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) if (priv->flags & MLX4_EN_FLAG_PROMISC) { if (netif_msg_rx_status(priv)) - mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", - priv->port); + en_warn(priv, "Leaving promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_PROMISC; /* Disable promiscouos mode */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) - mlx4_err(mdev, "Failed disabling promiscous mode\n"); + en_err(priv, "Failed disabling promiscous mode\n"); /* Enable port VLAN filter */ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); if (err) - mlx4_err(mdev, "Failed enabling VLAN filter\n"); + en_err(priv, "Failed enabling VLAN filter\n"); } /* Enable/disable the multicast filter according to IFF_ALLMULTI */ @@ -290,12 +286,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) - mlx4_err(mdev, "Failed disabling multicast filter\n"); + en_err(priv, "Failed disabling multicast filter\n"); } else { err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) - mlx4_err(mdev, "Failed disabling multicast filter\n"); + en_err(priv, "Failed disabling multicast filter\n"); /* Flush mcast filter and init it with broadcast address */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, @@ -314,7 +310,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_ENABLE); if (err) - mlx4_err(mdev, "Failed enabling multicast filter\n"); + en_err(priv, "Failed enabling multicast filter\n"); mlx4_en_clear_list(dev); } @@ -346,10 +342,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev) struct mlx4_en_dev *mdev = priv->mdev; if (netif_msg_timer(priv)) - mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); + en_warn(priv, "Tx timeout called on port:%d\n", priv->port); priv->port_stats.tx_timeout++; - mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); + en_dbg(DRV, priv, "Scheduling watchdog\n"); queue_work(mdev->workqueue, &priv->watchdog_task); } @@ -371,15 +367,15 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) int i; /* If we haven't received a specific coalescing setting - * (module param), we set the moderation paramters as follows: + * (module param), we set the moderation parameters as follows: * - moder_cnt is set to the number of mtu sized packets to * satisfy our coelsing target. * - moder_time is set to a fixed value. */ - priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1; + priv->rx_frames = MLX4_EN_RX_COAL_TARGET; priv->rx_usecs = MLX4_EN_RX_COAL_TIME; - mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " - "rx_frames:%d rx_usecs:%d\n", + en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " + "rx_frames:%d rx_usecs:%d\n", priv->dev->mtu, priv->rx_frames, priv->rx_usecs); /* Setup cq moderation params */ @@ -412,7 +408,6 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) { unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq; unsigned long packets; unsigned long rate; @@ -472,11 +467,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) moder_time = priv->rx_usecs; } - mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", - tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); + en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", + tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); - mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " - "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", + en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " + "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", priv->last_moder_time, moder_time, period, packets, avg_pkt_size, rate); @@ -487,8 +482,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) cq->moder_time = moder_time; err = mlx4_en_set_cq_moder(priv, cq); if (err) { - mlx4_err(mdev, "Failed modifying moderation for cq:%d " - "on port:%d\n", i, priv->port); + en_err(priv, "Failed modifying moderation for cq:%d\n", i); break; } } @@ -511,8 +505,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); if (err) - mlx4_dbg(HW, priv, "Could not update stats for " - "port:%d\n", priv->port); + en_dbg(HW, priv, "Could not update stats \n"); mutex_lock(&mdev->state_lock); if (mdev->device_up) { @@ -536,12 +529,10 @@ static void mlx4_en_linkstate(struct work_struct *work) * report to system log */ if (priv->last_link_state != linkstate) { if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { - if (netif_msg_link(priv)) - mlx4_info(mdev, "Port %d - link down\n", priv->port); + en_dbg(LINK, priv, "Link Down\n"); netif_carrier_off(priv->dev); } else { - if (netif_msg_link(priv)) - mlx4_info(mdev, "Port %d - link up\n", priv->port); + en_dbg(LINK, priv, "Link Up\n"); netif_carrier_on(priv->dev); } } @@ -556,58 +547,53 @@ int mlx4_en_start_port(struct net_device *dev) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq; struct mlx4_en_tx_ring *tx_ring; - struct mlx4_en_rx_ring *rx_ring; int rx_index = 0; int tx_index = 0; - u16 stride; int err = 0; int i; int j; if (priv->port_up) { - mlx4_dbg(DRV, priv, "start port called while port already up\n"); + en_dbg(DRV, priv, "start port called while port already up\n"); return 0; } /* Calculate Rx buf size */ dev->mtu = min(dev->mtu, priv->max_mtu); mlx4_en_calc_rx_buf(dev); - mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); - stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + - DS_SIZE * priv->num_frags); + en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); + /* Configure rx cq's and rings */ + err = mlx4_en_activate_rx_rings(priv); + if (err) { + en_err(priv, "Failed to activate RX rings\n"); + return err; + } for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; - rx_ring = &priv->rx_ring[i]; err = mlx4_en_activate_cq(priv, cq); if (err) { - mlx4_err(mdev, "Failed activating Rx CQ\n"); + en_err(priv, "Failed activating Rx CQ\n"); goto cq_err; } for (j = 0; j < cq->size; j++) cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; err = mlx4_en_set_cq_moder(priv, cq); if (err) { - mlx4_err(mdev, "Failed setting cq moderation parameters"); + en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); goto cq_err; } mlx4_en_arm_cq(priv, cq); - + priv->rx_ring[i].cqn = cq->mcq.cqn; ++rx_index; } - err = mlx4_en_activate_rx_rings(priv); - if (err) { - mlx4_err(mdev, "Failed to activate RX rings\n"); - goto cq_err; - } - err = mlx4_en_config_rss_steer(priv); if (err) { - mlx4_err(mdev, "Failed configuring rss steering\n"); - goto rx_err; + en_err(priv, "Failed configuring rss steering\n"); + goto cq_err; } /* Configure tx cq's and rings */ @@ -616,16 +602,16 @@ int mlx4_en_start_port(struct net_device *dev) cq = &priv->tx_cq[i]; err = mlx4_en_activate_cq(priv, cq); if (err) { - mlx4_err(mdev, "Failed allocating Tx CQ\n"); + en_err(priv, "Failed allocating Tx CQ\n"); goto tx_err; } err = mlx4_en_set_cq_moder(priv, cq); if (err) { - mlx4_err(mdev, "Failed setting cq moderation parameters"); + en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } - mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); + en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); cq->buf->wqe_index = cpu_to_be16(0xffff); /* Configure ring */ @@ -633,7 +619,7 @@ int mlx4_en_start_port(struct net_device *dev) err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, priv->rx_ring[0].srq.srqn); if (err) { - mlx4_err(mdev, "Failed allocating Tx ring\n"); + en_err(priv, "Failed allocating Tx ring\n"); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } @@ -651,30 +637,30 @@ int mlx4_en_start_port(struct net_device *dev) priv->prof->rx_pause, priv->prof->rx_ppp); if (err) { - mlx4_err(mdev, "Failed setting port general configurations" - " for port %d, with error %d\n", priv->port, err); + en_err(priv, "Failed setting port general configurations " + "for port %d, with error %d\n", priv->port, err); goto tx_err; } /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { - mlx4_err(mdev, "Failed setting default qp numbers\n"); + en_err(priv, "Failed setting default qp numbers\n"); goto tx_err; } /* Set port mac number */ - mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); + en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); err = mlx4_register_mac(mdev->dev, priv->port, priv->mac, &priv->mac_index); if (err) { - mlx4_err(mdev, "Failed setting port mac\n"); + en_err(priv, "Failed setting port mac\n"); goto tx_err; } /* Init port */ - mlx4_dbg(HW, priv, "Initializing port\n"); + en_dbg(HW, priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); if (err) { - mlx4_err(mdev, "Failed Initializing port\n"); + en_err(priv, "Failed Initializing port\n"); goto mac_err; } @@ -694,12 +680,11 @@ tx_err: } mlx4_en_release_rss_steer(priv); -rx_err: - for (i = 0; i < priv->rx_ring_num; i++) - mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); cq_err: while (rx_index--) mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); + for (i = 0; i < priv->rx_ring_num; i++) + mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); return err; /* need to close devices */ } @@ -712,8 +697,7 @@ void mlx4_en_stop_port(struct net_device *dev) int i; if (!priv->port_up) { - mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", - priv->port); + en_dbg(DRV, priv, "stop port called while port already down\n"); return; } netif_stop_queue(dev); @@ -758,13 +742,13 @@ static void mlx4_en_restart(struct work_struct *work) struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; - mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); + en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); mutex_lock(&mdev->state_lock); if (priv->port_up) { mlx4_en_stop_port(dev); if (mlx4_en_start_port(dev)) - mlx4_err(mdev, "Failed restarting port %d\n", priv->port); + en_err(priv, "Failed restarting port %d\n", priv->port); } mutex_unlock(&mdev->state_lock); } @@ -780,14 +764,14 @@ static int mlx4_en_open(struct net_device *dev) mutex_lock(&mdev->state_lock); if (!mdev->device_up) { - mlx4_err(mdev, "Cannot open - device down/disabled\n"); + en_err(priv, "Cannot open - device down/disabled\n"); err = -EBUSY; goto out; } /* Reset HW statistics and performance counters */ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) - mlx4_dbg(HW, priv, "Failed dumping statistics\n"); + en_dbg(HW, priv, "Failed dumping statistics\n"); memset(&priv->stats, 0, sizeof(priv->stats)); memset(&priv->pstats, 0, sizeof(priv->pstats)); @@ -804,7 +788,7 @@ static int mlx4_en_open(struct net_device *dev) mlx4_en_set_default_moderation(priv); err = mlx4_en_start_port(dev); if (err) - mlx4_err(mdev, "Failed starting port:%d\n", priv->port); + en_err(priv, "Failed starting port:%d\n", priv->port); out: mutex_unlock(&mdev->state_lock); @@ -817,8 +801,7 @@ static int mlx4_en_close(struct net_device *dev) struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - if (netif_msg_ifdown(priv)) - mlx4_info(mdev, "Close called for port:%d\n", priv->port); + en_dbg(IFDOWN, priv, "Close port called\n"); mutex_lock(&mdev->state_lock); @@ -850,7 +833,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_port_profile *prof = priv->prof; int i; @@ -879,7 +861,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) return 0; err: - mlx4_err(mdev, "Failed to allocate NIC resources\n"); + en_err(priv, "Failed to allocate NIC resources\n"); return -ENOMEM; } @@ -889,7 +871,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); + en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); /* Unregister device - this will close the port if it was up */ if (priv->registered) @@ -918,11 +900,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) struct mlx4_en_dev *mdev = priv->mdev; int err = 0; - mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", + en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", dev->mtu, new_mtu); if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { - mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); + en_err(priv, "Bad MTU size:%d.\n", new_mtu); return -EPERM; } dev->mtu = new_mtu; @@ -932,13 +914,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (!mdev->device_up) { /* NIC is probably restarting - let watchdog task reset * the port */ - mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); + en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { mlx4_en_stop_port(dev); mlx4_en_set_default_moderation(priv); err = mlx4_en_start_port(dev); if (err) { - mlx4_err(mdev, "Failed restarting port:%d\n", + en_err(priv, "Failed restarting port:%d\n", priv->port); queue_work(mdev->workqueue, &priv->watchdog_task); } @@ -952,6 +934,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, .ndo_get_stats = mlx4_en_get_stats, .ndo_set_multicast_list = mlx4_en_set_multicast, .ndo_set_mac_address = mlx4_en_set_mac, @@ -974,7 +957,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, int i; int err; - dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); + dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); if (dev == NULL) { mlx4_err(mdev, "Net device allocation failed\n"); return -ENOMEM; @@ -1012,7 +995,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; priv->mac = mdev->dev->caps.def_mac[priv->port]; if (ILLEGAL_MAC(priv->mac)) { - mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", + en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", priv->port, priv->mac); err = -EINVAL; goto out; @@ -1031,19 +1014,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); if (err) { - mlx4_err(mdev, "Failed to allocate page for rx qps\n"); + en_err(priv, "Failed to allocate page for rx qps\n"); goto out; } priv->allocated = 1; - /* Populate Tx priority mappings */ - mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num); - /* * Initialize netdev entry points */ dev->netdev_ops = &mlx4_netdev_ops; dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; + dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS; SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); @@ -1057,7 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, * Set driver features */ dev->features |= NETIF_F_SG; + dev->vlan_features |= NETIF_F_SG; dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | @@ -1067,6 +1050,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mdev->LSO_support) { dev->features |= NETIF_F_TSO; dev->features |= NETIF_F_TSO6; + dev->vlan_features |= NETIF_F_TSO; + dev->vlan_features |= NETIF_F_TSO6; } mdev->pndev[port] = dev; @@ -1074,9 +1059,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_carrier_off(dev); err = register_netdev(dev); if (err) { - mlx4_err(mdev, "Netdev registration failed\n"); + en_err(priv, "Netdev registration failed for port %d\n", port); goto out; } + + en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); + en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); + priv->registered = 1; queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); return 0; diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 9ee873e872b..5a14899c1e2 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, goto out; page_alloc->offset = priv->frag_info[i].frag_align; - mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", - i, page_alloc->page); + en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", + i, page_alloc->page); } return 0; @@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++) { page_alloc = &ring->page_alloc[i]; - mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", - i, page_count(page_alloc->page)); + en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", + i, page_count(page_alloc->page)); put_page(page_alloc->page); page_alloc->page = NULL; @@ -202,12 +202,34 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); } -static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) +static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, + int index) { struct mlx4_en_dev *mdev = priv->mdev; + struct skb_frag_struct *skb_frags; + struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride); + dma_addr_t dma; + int nr; + + skb_frags = ring->rx_info + (index << priv->log_rx_info); + for (nr = 0; nr < priv->num_frags; nr++) { + en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); + dma = be64_to_cpu(rx_desc->data[nr].addr); + + en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); + pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, + PCI_DMA_FROMDEVICE); + put_page(skb_frags[nr].page); + } +} + +static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) +{ struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; + int new_size; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { @@ -216,22 +238,34 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { - mlx4_err(mdev, "Failed to allocate " - "enough rx buffers\n"); + en_err(priv, "Failed to allocate " + "enough rx buffers\n"); return -ENOMEM; } else { - if (netif_msg_rx_err(priv)) - mlx4_warn(mdev, - "Only %d buffers allocated\n", - ring->actual_size); - goto out; + new_size = rounddown_pow_of_two(ring->actual_size); + en_warn(priv, "Only %d buffers allocated " + "reducing ring size to %d", + ring->actual_size, new_size); + goto reduce_rings; } } ring->actual_size++; ring->prod++; } } -out: + return 0; + +reduce_rings: + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = &priv->rx_ring[ring_ind]; + while (ring->actual_size > new_size) { + ring->actual_size--; + ring->prod--; + mlx4_en_free_rx_desc(priv, ring, ring->actual_size); + } + ring->size_mask = ring->actual_size - 1; + } + return 0; } @@ -247,15 +281,14 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev, ring->size_mask); if (err) { if (netif_msg_rx_err(priv)) - mlx4_warn(priv->mdev, - "Failed preparing rx descriptor\n"); + en_warn(priv, "Failed preparing rx descriptor\n"); priv->port_stats.rx_alloc_failed++; break; } ++num; ++ring->prod; } - if ((u32) (ring->prod - ring->cons) == ring->size) + if ((u32) (ring->prod - ring->cons) == ring->actual_size) ring->full = 1; return num; @@ -264,33 +297,17 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev, static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { - struct mlx4_en_dev *mdev = priv->mdev; - struct skb_frag_struct *skb_frags; - struct mlx4_en_rx_desc *rx_desc; - dma_addr_t dma; int index; - int nr; - mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", - ring->cons, ring->prod); + en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", + ring->cons, ring->prod); /* Unmap and free Rx buffers */ - BUG_ON((u32) (ring->prod - ring->cons) > ring->size); + BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); while (ring->cons != ring->prod) { index = ring->cons & ring->size_mask; - rx_desc = ring->buf + (index << ring->log_stride); - skb_frags = ring->rx_info + (index << priv->log_rx_info); - mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); - - for (nr = 0; nr < priv->num_frags; nr++) { - mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); - dma = be64_to_cpu(rx_desc->data[nr].addr); - - mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); - pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, - PCI_DMA_FROMDEVICE); - put_page(skb_frags[nr].page); - } + en_dbg(DRV, priv, "Processing descriptor:%d\n", index); + mlx4_en_free_rx_desc(priv, ring, index); ++ring->cons; } } @@ -354,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, sizeof(struct skb_frag_struct)); ring->rx_info = vmalloc(tmp); if (!ring->rx_info) { - mlx4_err(mdev, "Failed allocating rx_info ring\n"); + en_err(priv, "Failed allocating rx_info ring\n"); return -ENOMEM; } - mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", + en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", ring->rx_info, tmp); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, @@ -367,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { - mlx4_err(mdev, "Failed to map RX buffer\n"); + en_err(priv, "Failed to map RX buffer\n"); goto err_hwq; } ring->buf = ring->wqres.buf.direct.buf; @@ -385,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, sizeof(struct net_lro_desc), GFP_KERNEL); if (!ring->lro.lro_arr) { - mlx4_err(mdev, "Failed to allocate lro array\n"); + en_err(priv, "Failed to allocate lro array\n"); goto err_map; } ring->lro.get_frag_header = mlx4_en_get_frag_header; @@ -436,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) /* Initialize page allocators */ err = mlx4_en_init_allocator(priv, ring); if (err) { - mlx4_err(mdev, "Failed initializing ring allocator\n"); + en_err(priv, "Failed initializing ring allocator\n"); ring_ind--; goto err_allocator; } @@ -454,7 +471,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) mlx4_en_update_rx_prod_db(ring); /* Configure SRQ representing the ring */ - ring->srq.max = ring->size; + ring->srq.max = ring->actual_size; ring->srq.max_gs = max_gs; ring->srq.wqe_shift = ilog2(ring->stride); @@ -467,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, ring->wqres.db.dma, &ring->srq); if (err){ - mlx4_err(mdev, "Failed to allocate srq\n"); + en_err(priv, "Failed to allocate srq\n"); ring_ind--; goto err_srq; } @@ -582,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); if (!skb) { - mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); + en_dbg(RX_ERR, priv, "Failed allocating skb\n"); return NULL; } skb->dev = priv->dev; @@ -661,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; @@ -698,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { - mlx4_err(mdev, "CQE completed in error - vendor " + en_err(priv, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { - mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); + en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } @@ -855,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 u16 res = MLX4_EN_ALLOC_SIZE % stride; u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; - mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " + en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " "res:%d offset:%d\n", stride, align, res, offset); return offset; } @@ -900,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) priv->rx_skb_size = eff_mtu; priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); - mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " + en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " "num_frags:%d):\n", eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { - mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " + en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " "stride:%d last_offset:%d\n", i, priv->frag_info[i].frag_size, priv->frag_info[i].frag_prefix_size, @@ -923,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, int i; rss_map->size = roundup_pow_of_two(num_entries); - mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", - rss_map->size); + en_dbg(DRV, priv, "Setting default RSS map of %d entires\n", + rss_map->size); for (i = 0; i < rss_map->size; i++) { rss_map->map[i] = i % num_rings; - mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); + en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); } } @@ -943,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, context = kmalloc(sizeof *context , GFP_KERNEL); if (!context) { - mlx4_err(mdev, "Failed to allocate qp context\n"); + en_err(priv, "Failed to allocate qp context\n"); return -ENOMEM; } err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { - mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); + en_err(priv, "Failed to allocate qp #%x\n", qpn); goto out; } qp->event = mlx4_en_sqp_event; @@ -981,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) int err = 0; int good_qps = 0; - mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); + en_dbg(DRV, priv, "Configuring rss steering\n"); err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, rss_map->size, &rss_map->base_qpn); if (err) { - mlx4_err(mdev, "Failed reserving %d qps for port %u\n", - rss_map->size, priv->port); + en_err(priv, "Failed reserving %d qps\n", rss_map->size); return err; } @@ -1006,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) /* Configure RSS indirection qp */ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); if (err) { - mlx4_err(mdev, "Failed to reserve range for RSS " - "indirection qp\n"); + en_err(priv, "Failed to reserve range for RSS " + "indirection qp\n"); goto rss_err; } err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); if (err) { - mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); + en_err(priv, "Failed to allocate RSS indirection QP\n"); goto reserve_err; } rss_map->indir_qp.event = mlx4_en_sqp_event; diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index ac6fc499b28..5dc7466ad03 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -68,15 +68,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { - mlx4_err(mdev, "Failed allocating tx_info ring\n"); + en_err(priv, "Failed allocating tx_info ring\n"); return -ENOMEM; } - mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", + en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); if (!ring->bounce_buf) { - mlx4_err(mdev, "Failed allocating bounce buffer\n"); + en_err(priv, "Failed allocating bounce buffer\n"); err = -ENOMEM; goto err_tx; } @@ -85,31 +85,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { - mlx4_err(mdev, "Failed allocating hwq resources\n"); + en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { - mlx4_err(mdev, "Failed to map TX buffer\n"); + en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; - mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " - "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, - ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); + en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " + "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, + ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); if (err) { - mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); + en_err(priv, "Failed reserving qp for tx ring.\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); if (err) { - mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); + en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; @@ -135,7 +135,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring) { struct mlx4_en_dev *mdev = priv->mdev; - mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); + en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); @@ -274,12 +274,12 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; - mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", + en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) - mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); + en_warn(priv, "Tx consumer passed producer!\n"); return 0; } @@ -292,39 +292,11 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) } if (cnt) - mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); + en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; } -void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num) -{ - int block = 8 / ring_num; - int extra = 8 - (block * ring_num); - int num = 0; - u16 ring = 1; - int prio; - - if (ring_num == 1) { - for (prio = 0; prio < 8; prio++) - prio_map[prio] = 0; - return; - } - - for (prio = 0; prio < 8; prio++) { - if (extra && (num == block + 1)) { - ring++; - num = 0; - extra--; - } else if (!extra && (num == block)) { - ring++; - num = 0; - } - prio_map[prio] = ring; - mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); - num++; - } -} static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) { @@ -386,18 +358,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) if (unlikely(ring->blocked)) { if ((u32) (ring->prod - ring->cons) <= ring->size - HEADROOM - MAX_DESC_TXBBS) { - - /* TODO: support multiqueue netdevs. Currently, we block - * when *any* ring is full. Note that: - * - 2 Tx rings can unblock at the same time and call - * netif_wake_queue(), which is OK since this - * operation is idempotent. - * - We might wake the queue just after another ring - * stopped it. This is no big deal because the next - * transmission on that ring would stop the queue. - */ ring->blocked = 0; - netif_wake_queue(dev); + netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); priv->port_stats.wake_queue++; } } @@ -426,7 +388,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) INC_PERF_COUNTER(priv->pstats.tx_poll); - if (!spin_trylock(&ring->comp_lock)) { + if (!spin_trylock_irq(&ring->comp_lock)) { mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); return; } @@ -439,7 +401,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) if (inflight && priv->port_up) mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); - spin_unlock(&ring->comp_lock); + spin_unlock_irq(&ring->comp_lock); } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, @@ -482,9 +444,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) - if (spin_trylock(&ring->comp_lock)) { + if (spin_trylock_irq(&ring->comp_lock)) { mlx4_en_process_tx_cq(priv->dev, cq); - spin_unlock(&ring->comp_lock); + spin_unlock_irq(&ring->comp_lock); } } @@ -539,7 +501,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, int *lso_header_size) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; int real_size; if (skb_is_gso(skb)) { @@ -553,14 +514,14 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, real_size += DS_SIZE; else { if (netif_msg_tx_err(priv)) - mlx4_warn(mdev, "Non-linear headers\n"); + en_warn(priv, "Non-linear headers\n"); dev_kfree_skb_any(skb); return 0; } } if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { if (netif_msg_tx_err(priv)) - mlx4_warn(mdev, "LSO header size too big\n"); + en_warn(priv, "LSO header size too big\n"); dev_kfree_skb_any(skb); return 0; } @@ -617,21 +578,20 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; } -static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb, - u16 *vlan_tag) +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) { - int tx_ind; + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 vlan_tag = 0; - /* Obtain VLAN information if present */ - if (priv->vlgrp && vlan_tx_tag_present(skb)) { - *vlan_tag = vlan_tx_tag_get(skb); - /* Set the Tx ring to use according to vlan priority */ - tx_ind = priv->tx_prio_map[*vlan_tag >> 13]; - } else { - *vlan_tag = 0; - tx_ind = 0; + /* If we support per priority flow control and the packet contains + * a vlan tag, send the packet to the TX ring assigned to that priority + */ + if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); } - return tx_ind; + + return skb_tx_hash(dev, skb); } int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) @@ -651,7 +611,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t dma; u32 index; __be32 op_own; - u16 vlan_tag; + u16 vlan_tag = 0; int i; int lso_header_size; void *fragptr; @@ -669,20 +629,21 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) nr_txbb = desc_size / TXBB_SIZE; if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { if (netif_msg_tx_err(priv)) - mlx4_warn(mdev, "Oversized header or SG list\n"); + en_warn(priv, "Oversized header or SG list\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - tx_ind = get_vlan_info(priv, skb, &vlan_tag); + tx_ind = skb->queue_mapping; ring = &priv->tx_ring[tx_ind]; + if (priv->vlgrp && vlan_tx_tag_present(skb)) + vlan_tag = vlan_tx_tag_get(skb); /* Check available TXBBs And 2K spare for prefetch */ if (unlikely(((int)(ring->prod - ring->cons)) > ring->size - HEADROOM - MAX_DESC_TXBBS)) { - /* every full Tx ring stops queue. - * TODO: implement multi-queue support (per-queue stop) */ - netif_stop_queue(dev); + /* every full Tx ring stops queue */ + netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); ring->blocked = 1; priv->port_stats.queue_stopped++; @@ -695,7 +656,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Now that we know what Tx ring to use */ if (unlikely(!priv->port_up)) { if (netif_msg_tx_err(priv)) - mlx4_warn(mdev, "xmit: port down!\n"); + en_warn(priv, "xmit: port down!\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -819,7 +780,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Ring doorbell! */ wmb(); writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); - dev->trans_start = jiffies; /* Poll CQ here */ mlx4_en_xmit_poll(priv, tx_ind); diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 8830dcb92ec..b9ceddde46c 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) if (eq_table->have_irq) free_irq(dev->pdev->irq, dev); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) - if (eq_table->eq[i].have_irq) + if (eq_table->eq[i].have_irq) { free_irq(eq_table->eq[i].irq, eq_table->eq + i); + eq_table->eq[i].have_irq = 0; + } kfree(eq_table->irq_names); } @@ -623,8 +625,10 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, &priv->eq_table.eq[i]); - if (err) + if (err) { + --i; goto err_out_unmap; + } } err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 30bea968969..018348c0119 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " "(0/1, default 0)"); +static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, - MLX4_MTT_ENTRY_PER_SEG); + dev->caps.mtts_per_seg); dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; - dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; @@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void) return -1; } + if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { + printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + return -1; + } + return 0; } diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index ef840abbcd3..d43a9e4c2ae 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h @@ -49,26 +49,42 @@ #include "en_port.h" #define DRV_NAME "mlx4_en" -#define DRV_VERSION "1.4.0" -#define DRV_RELDATE "Sep 2008" +#define DRV_VERSION "1.4.1.1" +#define DRV_RELDATE "June 2009" #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) -#define mlx4_dbg(mlevel, priv, format, arg...) \ - if (NETIF_MSG_##mlevel & priv->msg_enable) \ - printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ - (dev_name(&priv->mdev->pdev->dev)) , ## arg) +#define en_print(level, priv, format, arg...) \ + { \ + if ((priv)->registered) \ + printk(level "%s: %s: " format, DRV_NAME, \ + (priv->dev)->name, ## arg); \ + else \ + printk(level "%s: %s: Port %d: " format, \ + DRV_NAME, dev_name(&priv->mdev->pdev->dev), \ + (priv)->port, ## arg); \ + } + +#define en_dbg(mlevel, priv, format, arg...) \ + { \ + if (NETIF_MSG_##mlevel & priv->msg_enable) \ + en_print(KERN_DEBUG, priv, format, ## arg) \ + } +#define en_warn(priv, format, arg...) \ + en_print(KERN_WARNING, priv, format, ## arg) +#define en_err(priv, format, arg...) \ + en_print(KERN_ERR, priv, format, ## arg) #define mlx4_err(mdev, format, arg...) \ printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ - (dev_name(&mdev->pdev->dev)) , ## arg) + dev_name(&mdev->pdev->dev) , ## arg) #define mlx4_info(mdev, format, arg...) \ printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ - (dev_name(&mdev->pdev->dev)) , ## arg) + dev_name(&mdev->pdev->dev) , ## arg) #define mlx4_warn(mdev, format, arg...) \ printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ - (dev_name(&mdev->pdev->dev)) , ## arg) + dev_name(&mdev->pdev->dev) , ## arg) /* * Device constants @@ -123,12 +139,14 @@ enum { #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) -#define MLX4_EN_TX_RING_NUM 9 -#define MLX4_EN_DEF_TX_RING_SIZE 1024 +#define MLX4_EN_SMALL_PKT_SIZE 64 +#define MLX4_EN_NUM_TX_RINGS 8 +#define MLX4_EN_NUM_PPP_RINGS 8 +#define MLX4_EN_DEF_TX_RING_SIZE 512 #define MLX4_EN_DEF_RX_RING_SIZE 1024 -/* Target number of bytes to coalesce with interrupt moderation */ -#define MLX4_EN_RX_COAL_TARGET 0x20000 +/* Target number of packets to coalesce with interrupt moderation */ +#define MLX4_EN_RX_COAL_TARGET 44 #define MLX4_EN_RX_COAL_TIME 0x10 #define MLX4_EN_TX_COAL_PKTS 5 @@ -462,7 +480,6 @@ struct mlx4_en_priv { int base_qpn; struct mlx4_en_rss_map rss_map; - u16 tx_prio_map[8]; u32 flags; #define MLX4_EN_FLAG_PROMISC 0x1 u32 tx_ring_num; @@ -500,8 +517,6 @@ void mlx4_en_stop_port(struct net_device *dev); void mlx4_en_free_resources(struct mlx4_en_priv *priv); int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); -int mlx4_en_get_profile(struct mlx4_en_dev *mdev); - int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int entries, int ring, enum cq_type mode); void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); @@ -512,6 +527,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_poll_tx_cq(unsigned long data); void mlx4_en_tx_irq(struct mlx4_cq *mcq); +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, @@ -546,7 +562,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev); void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, struct mlx4_en_rss_map *rss_map, int num_entries, int num_rings); -void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num); int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 0caf74cae8b..5887e4764d2 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, } else mtt->page_shift = page_shift; - for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) + for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) ++mtt->order; mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); @@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * - MLX4_MTT_ENTRY_PER_SEG); + dev->caps.mtts_per_seg); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } @@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) return -EINVAL; - if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) + if (start_index & (dev->caps.mtts_per_seg - 1)) return -EINVAL; mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + @@ -402,7 +402,8 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); - dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); + dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); return 0; } @@ -549,8 +550,8 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list for (i = 0; i < npages; ++i) fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); - dma_sync_single(&dev->pdev->dev, fmr->dma_handle, - npages * sizeof(u64), DMA_TO_DEVICE); + dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); fmr->mpt->key = cpu_to_be32(key); fmr->mpt->lkey = cpu_to_be32(key); diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index cebdf3243ca..bd22df95adf 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c @@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; - profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; profile[MLX4_RES_QP].num = request->num_qp; |