summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2012-03-09 10:55:17 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2012-03-09 10:55:17 -0800
commitb675b3667f6729dcd1036a2a129b35445947f905 (patch)
tree0d58791e9063d3ca2c352da6f3e7df2bdb876f9d /drivers/net/ethernet/mellanox/mlx4
parent104a5f3cad8f2f27cadbdf0029400ecd9e17ccc0 (diff)
parent192cfd58774b4d17b2fe8bdc77d89c2ef4e0591d (diff)
Merge commit 'v3.3-rc6' into next
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1337
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c440
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c408
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c913
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c232
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h674
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c486
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c639
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3074
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c132
31 files changed, 8371 insertions, 1024 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a1585..4a40ab967ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 45aea9c3ae2..915e947b422 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
- "Reset device on internal errors if non-zero (default 1)");
+ "Reset device on internal errors if non-zero"
+ " (default 1, in SRIOV mode default is 0)");
static void dump_err_buf(struct mlx4_dev *dev)
{
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
+ /*If we are in SRIOV the default of the module param must be 0*/
+ if (mlx4_is_mfunc(dev))
+ internal_err_reset = 0;
+
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a0b8c..eaf09d4f02d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,12 +39,18 @@
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
#include <asm/io.h>
#include "mlx4.h"
+#include "fw.h"
#define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK 0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
enum {
/* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
int next;
u64 out_param;
u16 token;
+ u8 fw_status;
};
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr);
+
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
return trans_table[status];
}
+static u8 mlx4_errno_to_status(int errno)
+{
+ switch (errno) {
+ case -EPERM:
+ return CMD_STAT_BAD_OP;
+ case -EINVAL:
+ return CMD_STAT_BAD_PARAM;
+ case -ENXIO:
+ return CMD_STAT_BAD_SYS_STATE;
+ case -EBUSY:
+ return CMD_STAT_RESOURCE_BUSY;
+ case -ENOMEM:
+ return CMD_STAT_EXCEED_LIM;
+ case -ENFILE:
+ return CMD_STAT_ICM_ERROR;
+ default:
+ return CMD_STAT_INTERNAL_ERR;
+ }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 status = readl(&priv->mfunc.comm->slave_read);
+
+ return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 val;
+
+ priv->cmd.comm_toggle ^= 1;
+ val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+ __raw_writel((__force u32) cpu_to_be32(val),
+ &priv->mfunc.comm->slave_write);
+ mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long end;
+ int err = 0;
+ int ret_from_pending = 0;
+
+ /* First, verify that the master reports correct status */
+ if (comm_pending(dev)) {
+ mlx4_warn(dev, "Communication channel is not idle."
+ "my toggle is %d (cmd:0x%x)\n",
+ priv->cmd.comm_toggle, cmd);
+ return -EAGAIN;
+ }
+
+ /* Write command */
+ down(&priv->cmd.poll_sem);
+ mlx4_comm_cmd_post(dev, cmd, param);
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+ ret_from_pending = comm_pending(dev);
+ if (ret_from_pending) {
+ /* check if the slave is trying to boot in the middle of
+ * FLR process. The only non-zero result in the RESET command
+ * is MLX4_DELAY_RESET_SLAVE*/
+ if ((MLX4_COMM_CMD_RESET == cmd)) {
+ mlx4_warn(dev, "Got slave FLRed from Communication"
+ " channel (ret:0x%x)\n", ret_from_pending);
+ err = MLX4_DELAY_RESET_SLAVE;
+ } else {
+ mlx4_warn(dev, "Communication channel timed out\n");
+ err = -ETIMEDOUT;
+ }
+ }
+
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+ u16 param, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_comm_cmd_post(dev, op, param);
+
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+ return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
- if (time_after_eq(jiffies, end))
+ if (time_after_eq(jiffies, end)) {
+ mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
+ }
cond_resched();
}
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
- op), hcr + 6);
+ op), hcr + 6);
/*
* Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
return ret;
}
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+ int ret;
+
+ down(&priv->cmd.slave_sem);
+ vhcr->in_param = cpu_to_be64(in_param);
+ vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+ vhcr->in_modifier = cpu_to_be32(in_modifier);
+ vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+ vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+ vhcr->status = 0;
+ vhcr->flags = !!(priv->cmd.use_events) << 6;
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ }
+ } else {
+ ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+ MLX4_COMM_TIME + timeout);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ } else
+ mlx4_err(dev, "failed execution of VHCR_POST command"
+ "opcode 0x%x\n", op);
+ }
+ up(&priv->cmd.slave_sem);
+ return ret;
+}
+
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
+ u32 stat;
down(&priv->cmd.poll_sem);
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
- err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+ stat = be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+ err = mlx4_status_to_errno(stat);
+ if (err)
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, stat);
out:
up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
if (token != context->token)
return;
+ context->fw_status = status;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
- if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
err = -EBUSY;
goto out;
}
err = context->result;
- if (err)
+ if (err) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
goto out;
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -311,17 +521,1053 @@ out:
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
- u16 op, unsigned long timeout)
+ u16 op, unsigned long timeout, int native)
{
- if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
- else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ }
+ return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+ int slave, u64 slave_addr,
+ int size, int is_read)
+{
+ u64 in_param;
+ u64 out_param;
+
+ if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+ (slave & ~0x7f) | (size & 0xff)) {
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+ "master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
+ return -EINVAL;
+ }
+
+ if (is_read) {
+ in_param = (u64) slave | slave_addr;
+ out_param = (u64) dev->caps.function | master_addr;
+ } else {
+ in_param = (u64) dev->caps.function | master_addr;
+ out_param = (u64) slave | slave_addr;
+ }
+
+ return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+ MLX4_CMD_ACCESS_MEM,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+ if (cmd->encode_slave_id) {
+ in_param &= 0xffffffffffffff00ll;
+ in_param |= slave;
+ }
+
+ err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+ vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm)
+ vhcr->out_param = out_param;
+
+ return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+ {
+ .opcode = MLX4_CMD_QUERY_FW,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_HCA,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_DEV_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_ADAPTER,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_INIT_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_CLOSE_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CLOSE_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_PORT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_PORT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MAP_EQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MAP_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_EQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_NOP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_ALLOC_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ALLOC_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_FREE_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FREE_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_MPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_MPT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_MPT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_READ_MTT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_WRITE_MTT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_WRITE_MTT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SYNC_TPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_CQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_CQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MODIFY_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MODIFY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_SRQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_SRQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_ARM_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ARM_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RST2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_RST2INIT_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2RTR_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT2RTR_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQERR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2ERR_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2SQD_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2SQD_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2RST_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_2RST_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_QP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_UNSUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_IF_STAT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_IF_STAT_wrapper
+ },
+ /* Native multicast commands are not available for guests */
+ {
+ .opcode = MLX4_CMD_QP_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_PROMISC,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_PROMISC_wrapper
+ },
+ /* Ethernet specific commands */
+ {
+ .opcode = MLX4_CMD_SET_VLAN_FLTR,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_MCAST_FLTR,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_DUMP_ETH_STATS,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INFORM_FLR_DONE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_info *cmd = NULL;
+ struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+ struct mlx4_vhcr *vhcr;
+ struct mlx4_cmd_mailbox *inbox = NULL;
+ struct mlx4_cmd_mailbox *outbox = NULL;
+ u64 in_param;
+ u64 out_param;
+ int ret = 0;
+ int i;
+ int err = 0;
+
+ /* Create sw representation of Virtual HCR */
+ vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+ if (!vhcr)
+ return -ENOMEM;
+
+ /* DMA in the vHCR */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr_cmd),
+ MLX4_ACCESS_MEM_ALIGN), 1);
+ if (ret) {
+ mlx4_err(dev, "%s:Failed reading vhcr"
+ "ret: 0x%x\n", __func__, ret);
+ kfree(vhcr);
+ return ret;
+ }
+ }
+
+ /* Fill SW VHCR fields */
+ vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+ vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+ vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+ vhcr->token = be16_to_cpu(vhcr_cmd->token);
+ vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+ vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+ vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+ /* Lookup command */
+ for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+ if (vhcr->op == cmd_info[i].opcode) {
+ cmd = &cmd_info[i];
+ break;
+ }
+ }
+ if (!cmd) {
+ mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+ vhcr->op, slave);
+ vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+ goto out_status;
+ }
+
+ /* Read inbox */
+ if (cmd->has_inbox) {
+ vhcr->in_param &= INBOX_MASK;
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ inbox = NULL;
+ goto out_status;
+ }
+
+ if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+ vhcr->in_param,
+ MLX4_MAILBOX_SIZE, 1)) {
+ mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+ __func__, cmd->opcode);
+ vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+ goto out_status;
+ }
+ }
+
+ /* Apply permission and bound checks if applicable */
+ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+ "checks for resource_id:%d\n", vhcr->op, slave,
+ vhcr->in_modifier);
+ vhcr_cmd->status = CMD_STAT_BAD_OP;
+ goto out_status;
+ }
+
+ /* Allocate outbox */
+ if (cmd->has_outbox) {
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ outbox = NULL;
+ goto out_status;
+ }
+ }
+
+ /* Execute the command! */
+ if (cmd->wrapper) {
+ err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+ cmd);
+ if (cmd->out_is_imm)
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ } else {
+ in_param = cmd->has_inbox ? (u64) inbox->dma :
+ vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma :
+ vhcr->out_param;
+ err = __mlx4_cmd(dev, in_param, &out_param,
+ cmd->out_is_imm, vhcr->in_modifier,
+ vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm) {
+ vhcr->out_param = out_param;
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ }
+ }
+
+ if (err) {
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+ " error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
+ vhcr_cmd->status = mlx4_errno_to_status(err);
+ goto out_status;
+ }
+
+
+ /* Write outbox if command completed successfully */
+ if (cmd->has_outbox && !vhcr_cmd->status) {
+ ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+ vhcr->out_param,
+ MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+ if (ret) {
+ /* If we failed to write back the outbox after the
+ *command was successfully executed, we must fail this
+ * slave, as it is now in undefined state */
+ mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+ goto out;
+ }
+ }
+
+out_status:
+ /* DMA back vhcr result */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr),
+ MLX4_ACCESS_MEM_ALIGN),
+ MLX4_CMD_WRAPPED);
+ if (ret)
+ mlx4_err(dev, "%s:Failed writing vhcr result\n",
+ __func__);
+ else if (vhcr->e_bit &&
+ mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+ mlx4_warn(dev, "Failed to generate command completion "
+ "eqe for slave %d\n", slave);
+ }
+
+out:
+ kfree(vhcr);
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+ u16 param, u8 toggle)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ u32 reply;
+ u32 slave_status = 0;
+ u8 is_going_down = 0;
+ int i;
+
+ slave_state[slave].comm_toggle ^= 1;
+ reply = (u32) slave_state[slave].comm_toggle << 31;
+ if (toggle != slave_state[slave].comm_toggle) {
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+ "STATE COMPROMISIED ***\n", toggle, slave);
+ goto reset_slave;
+ }
+ if (cmd == MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+ slave_state[slave].active = false;
+ for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
+ slave_state[slave].event_eq[i].eqn = -1;
+ slave_state[slave].event_eq[i].token = 0;
+ }
+ /*check if we are in the middle of FLR process,
+ if so return "retry" status to the slave*/
+ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ slave_status = MLX4_DELAY_RESET_SLAVE;
+ goto inform_slave_state;
+ }
+
+ /* write the version in the event field */
+ reply |= mlx4_comm_get_version();
+
+ goto reset_slave;
+ }
+ /*command from slave in the middle of FLR*/
+ if (cmd != MLX4_COMM_CMD_RESET &&
+ MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+ "in the middle of FLR\n", slave, cmd);
+ return;
+ }
+
+ switch (cmd) {
+ case MLX4_COMM_CMD_VHCR0:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma = ((u64) param) << 48;
+ priv->mfunc.master.slave_state[slave].cookie = 0;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ break;
+ case MLX4_COMM_CMD_VHCR1:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+ break;
+ case MLX4_COMM_CMD_VHCR2:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+ break;
+ case MLX4_COMM_CMD_VHCR_EN:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= param;
+ slave_state[slave].active = true;
+ break;
+ case MLX4_COMM_CMD_VHCR_POST:
+ if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+ (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+ goto reset_slave;
+ down(&priv->cmd.slave_sem);
+ if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+ mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+ " reseting slave.\n", slave);
+ up(&priv->cmd.slave_sem);
+ goto reset_slave;
+ }
+ up(&priv->cmd.slave_sem);
+ break;
+ default:
+ mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+ goto reset_slave;
+ }
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = cmd;
+ else
+ is_going_down = 1;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ if (is_going_down) {
+ mlx4_warn(dev, "Slave is going down aborting command(%d)"
+ " executing from slave:%d\n",
+ cmd, slave);
+ return;
+ }
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ mmiowb();
+
+ return;
+
+reset_slave:
+ /* cleanup any slave resources */
+ mlx4_delete_all_resources_for_slave(dev, slave);
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+ memset(&slave_state[slave].event_eq, 0,
+ sizeof(struct mlx4_slave_event_eq_info));
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work,
+ struct mlx4_mfunc_master_ctx,
+ comm_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ __be32 *bit_vec;
+ u32 comm_cmd;
+ u32 vec;
+ int i, j, slave;
+ int toggle;
+ int served = 0;
+ int reported = 0;
+ u32 slt;
+
+ bit_vec = master->comm_arm_bit_vector;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+ vec = be32_to_cpu(bit_vec[i]);
+ for (j = 0; j < 32; j++) {
+ if (!(vec & (1 << j)))
+ continue;
+ ++reported;
+ slave = (i * 32) + j;
+ comm_cmd = swab32(readl(
+ &mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read))
+ >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ printk(KERN_INFO "slave %d out of sync."
+ " read toggle %d, state toggle %d. "
+ "Resynching.\n", slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
+ }
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
+ }
+ }
+ }
+
+ if (reported && reported != served)
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+ " but %d were served\n",
+ reported, served);
+
+ if (mlx4_ARM_COMM_CHANNEL(dev))
+ mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int wr_toggle;
+ int rd_toggle;
+ unsigned long end;
+
+ wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+ end = jiffies + msecs_to_jiffies(5000);
+
+ while (time_before(jiffies, end)) {
+ rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+ if (rd_toggle == wr_toggle) {
+ priv->cmd.comm_toggle = rd_toggle;
+ return 0;
+ }
+
+ cond_resched();
+ }
+
+ /*
+ * we could reach here if for example the previous VM using this
+ * function misbehaved and left the channel with unsynced state. We
+ * should fix this here and give this VM a chance to use a properly
+ * synced channel
+ */
+ mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+ priv->cmd.comm_toggle = 0;
+
+ return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i, j, err, port;
+
+ priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ &priv->mfunc.vhcr_dma,
+ GFP_KERNEL);
+ if (!priv->mfunc.vhcr) {
+ mlx4_err(dev, "Couldn't allocate vhcr.\n");
+ return -ENOMEM;
+ }
+
+ if (mlx4_is_master(dev))
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+ priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+ else
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+ if (!priv->mfunc.comm) {
+ mlx4_err(dev, "Couldn't map communication vector.\n");
+ goto err_vhcr;
+ }
+
+ if (mlx4_is_master(dev)) {
+ priv->mfunc.master.slave_state =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_slave_state), GFP_KERNEL);
+ if (!priv->mfunc.master.slave_state)
+ goto err_comm;
+
+ for (i = 0; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
+ s_state->event_eq[j].eqn = -1;
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_write);
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_read);
+ mmiowb();
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ s_state->vlan_filter[port] =
+ kzalloc(sizeof(struct mlx4_vlan_fltr),
+ GFP_KERNEL);
+ if (!s_state->vlan_filter[port]) {
+ if (--port)
+ kfree(s_state->vlan_filter[port]);
+ goto err_slaves;
+ }
+ INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+ }
+ spin_lock_init(&s_state->lock);
+ }
+
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ INIT_WORK(&priv->mfunc.master.comm_work,
+ mlx4_master_comm_channel);
+ INIT_WORK(&priv->mfunc.master.slave_event_work,
+ mlx4_gen_slave_eqe);
+ INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+ mlx4_master_handle_slave_flr);
+ spin_lock_init(&priv->mfunc.master.slave_state_lock);
+ priv->mfunc.master.comm_wq =
+ create_singlethread_workqueue("mlx4_comm");
+ if (!priv->mfunc.master.comm_wq)
+ goto err_slaves;
+
+ if (mlx4_init_resource_tracker(dev))
+ goto err_thread;
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ err = mlx4_ARM_COMM_CHANNEL(dev);
+ if (err) {
+ mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+ err);
+ goto err_resource;
+ }
+
+ } else {
+ err = sync_toggles(dev);
+ if (err) {
+ mlx4_err(dev, "Couldn't sync toggles\n");
+ goto err_comm;
+ }
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ }
+ return 0;
+
+err_resource:
+ mlx4_free_resource_tracker(dev);
+err_thread:
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+ while (--i) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+err_comm:
+ iounmap(priv->mfunc.comm);
+err_vhcr:
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ return -ENOMEM;
+}
+
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1577,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
- priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
- MLX4_HCR_SIZE);
- if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.");
- return -ENOMEM;
+ priv->cmd.hcr = NULL;
+ priv->mfunc.vhcr = NULL;
+
+ if (!mlx4_is_slave(dev)) {
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+ MLX4_HCR_BASE, MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.\n");
+ return -ENOMEM;
+ }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool) {
+ if (!priv->cmd.pool)
+ goto err_hcr;
+
+ return 0;
+
+err_hcr:
+ if (!mlx4_is_slave(dev))
iounmap(priv->cmd.hcr);
- return -ENOMEM;
+ return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, port;
+
+ if (mlx4_is_master(dev)) {
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+ for (i = 0; i < dev->num_slaves; i++) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
}
- return 0;
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1629,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
pci_pool_destroy(priv->cmd.pool);
- iounmap(priv->cmd.hcr);
+
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
}
/*
@@ -365,6 +1642,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ int err = 0;
priv->cmd.context = kmalloc(priv->cmd.max_cmds *
sizeof (struct mlx4_cmd_context),
@@ -389,11 +1667,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
; /* nothing */
--priv->cmd.token_mask;
- priv->cmd.use_events = 1;
-
down(&priv->cmd.poll_sem);
+ priv->cmd.use_events = 1;
- return 0;
+ return err;
}
/*
@@ -433,7 +1710,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
@@ -442,3 +1720,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+ return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 499a5168892..7e64033d7de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
-#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
@@ -44,27 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_cq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- __be32 logsize_usrpage;
- __be16 cq_period;
- __be16 cq_max_count;
- u8 reserved2[3];
- u8 comp_eqn;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved4[2];
- __be64 db_rec_addr;
-};
-
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (!cq) {
- mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
+ MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
+ cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (*cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+ if (err)
+ goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+ else {
+ *cqn = get_param_l(&out_param);
+ return 0;
+ }
+ }
+ return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+ mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+ mlx4_table_put(dev, &cq_table->table, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cqn);
+ err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+ } else
+ __mlx4_cq_free_icm(dev, cqn);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
cq->vector = vector;
- cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
- if (cq->cqn == -1)
- return -ENOMEM;
-
- err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
- if (err)
- goto err_out;
-
- err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ err = mlx4_cq_alloc_icm(dev, &cq->cqn);
if (err)
- goto err_put;
+ return err;
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+ mlx4_cq_free_icm(dev, cq->cqn);
return err;
}
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
complete(&cq->free);
wait_for_completion(&cq->free);
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+ mlx4_cq_free_icm(dev, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 227997d775e..00b81272e31 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX)
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- else
- cq->buf_size = sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->ring = ring;
cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+ cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
if (err)
return err;
@@ -147,6 +144,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 74e2a2a8a02..70346fd7f9c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
- sprintf(drvinfo->fw_version, "%d.%d.%d",
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
int err = 0;
u64 config = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+ if ((priv->port < 1) || (priv->port > 2)) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
u64 config = 0;
int err = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+ if ((priv->port < 1) || (priv->port > 2))
+ return -EOPNOTSUPP;
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
@@ -164,10 +183,11 @@ static int mlx4_en_set_wol(struct net_device *netdev,
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ int bit_count = hweight64(priv->stats_bitmap);
switch (sset) {
case ETH_SS_STATS:
- return NUM_ALL_STATS +
+ return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num + priv->rx_ring_num) * 2;
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
@@ -182,14 +202,34 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
- int i;
+ int i, j = 0;
spin_lock_bh(&priv->stats_lock);
- for (i = 0; i < NUM_MAIN_STATS; i++)
- data[index++] = ((unsigned long *) &priv->stats)[i];
- for (i = 0; i < NUM_PORT_STATS; i++)
- data[index++] = ((unsigned long *) &priv->port_stats)[i];
+ if (!(priv->stats_bitmap)) {
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ data[index++] =
+ ((unsigned long *) &priv->stats)[i];
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ data[index++] =
+ ((unsigned long *) &priv->port_stats)[i];
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ data[index++] =
+ ((unsigned long *) &priv->pkstats)[i];
+ } else {
+ for (i = 0; i < NUM_MAIN_STATS; i++) {
+ if ((priv->stats_bitmap >> j) & 1)
+ data[index++] =
+ ((unsigned long *) &priv->stats)[i];
+ j++;
+ }
+ for (i = 0; i < NUM_PORT_STATS; i++) {
+ if ((priv->stats_bitmap >> j) & 1)
+ data[index++] =
+ ((unsigned long *) &priv->port_stats)[i];
+ j++;
+ }
+ }
for (i = 0; i < priv->tx_ring_num; i++) {
data[index++] = priv->tx_ring[i].packets;
data[index++] = priv->tx_ring[i].bytes;
@@ -198,8 +238,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
}
- for (i = 0; i < NUM_PKT_STATS; i++)
- data[index++] = ((unsigned long *) &priv->pkstats)[i];
spin_unlock_bh(&priv->stats_lock);
}
@@ -228,11 +266,29 @@ static void mlx4_en_get_strings(struct net_device *dev,
case ETH_SS_STATS:
/* Add main counters */
- for (i = 0; i < NUM_MAIN_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
- for (i = 0; i< NUM_PORT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i + NUM_MAIN_STATS]);
+ if (!priv->stats_bitmap) {
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i]);
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i +
+ NUM_MAIN_STATS]);
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i +
+ NUM_MAIN_STATS +
+ NUM_PORT_STATS]);
+ } else
+ for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
+ if ((priv->stats_bitmap >> i) & 1) {
+ strcpy(data +
+ (index++) * ETH_GSTRING_LEN,
+ main_strings[i]);
+ }
+ if (!(priv->stats_bitmap >> i))
+ break;
+ }
for (i = 0; i < priv->tx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
@@ -245,9 +301,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
}
- for (i = 0; i< NUM_PKT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
break;
}
}
@@ -460,6 +513,95 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_pending = priv->tx_ring[0].size;
}
+static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->rx_ring_num;
+}
+
+static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ int rss_rings;
+ size_t n = priv->rx_ring_num;
+ int err = 0;
+
+ rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
+
+ while (n--) {
+ ring_index[n] = rss_map->qps[n % rss_rings].qpn -
+ rss_map->base_qpn;
+ }
+
+ return err;
+}
+
+static int mlx4_en_set_rxfh_indir(struct net_device *dev,
+ const u32 *ring_index)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+ int i;
+ int rss_rings = 0;
+
+ /* Calculate RSS table size and make sure flows are spread evenly
+ * between rings
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (i > 0 && !ring_index[i] && !rss_rings)
+ rss_rings = i;
+
+ if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+ return -EINVAL;
+ }
+
+ if (!rss_rings)
+ rss_rings = priv->rx_ring_num;
+
+ /* RSS table size must be an order of 2 */
+ if (!is_power_of_2(rss_rings))
+ return -EINVAL;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev);
+ }
+
+ priv->prof->rss_rings = rss_rings;
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
+static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->rx_ring_num;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -479,6 +621,10 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
+ .get_rxnfc = mlx4_en_get_rxnfc,
+ .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
+ .get_rxfh_indir = mlx4_en_get_rxfh_indir,
+ .set_rxfh_indir = mlx4_en_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a06096fcc0b..2097a7d3c5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -62,10 +62,6 @@ static const char mlx4_en_version[] =
* Device scope module parameters
*/
-
-/* Enable RSS TCP traffic */
-MLX4_EN_PARM_INT(tcp_rss, 1,
- "Enable RSS for incomming TCP traffic or disabled (0)");
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
"Enable RSS for incomming UDP traffic or disabled (0)");
@@ -104,7 +100,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
struct mlx4_en_profile *params = &mdev->profile;
int i;
- params->tcp_rss = tcp_rss;
params->udp_rss = udp_rss;
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
@@ -120,6 +115,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+ params->prof[i].rss_rings = 0;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 78d776bc355..149e60da0a3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,7 +45,7 @@
#include "mlx4_en.h"
#include "en_port.h"
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+ return 0;
}
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
+
+ return 0;
}
u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac, 0);
+ priv->base_qpn, priv->mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
kfree(priv->mc_addrs);
+ priv->mc_addrs = NULL;
priv->mc_addrs_cnt = 0;
}
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ mlx4_en_clear_list(dev);
priv->mc_addrs = mc_addrs;
priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
goto out;
}
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
/*
* Promsicuous mode: disable all filters
*/
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
++rx_index;
}
- /* Set port mac number */
- en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn, 0);
+ /* Set qp number */
+ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+ err = mlx4_get_eth_qp(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn);
if (err) {
- en_err(priv, "Failed setting port mac\n");
+ en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
@@ -687,6 +702,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
+ mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+
priv->port_up = true;
netif_tx_start_all_queues(dev);
return 0;
@@ -699,7 +716,7 @@ tx_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +762,6 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
- /* Unregister Mac address for the port */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
-
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +775,10 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
+ /* Unregister Mac address for the port */
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mdev->mac_removed[priv->port] = 1;
+
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -792,37 +809,49 @@ static void mlx4_en_restart(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-
-static int mlx4_en_open(struct net_device *dev)
+static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- int err = 0;
-
- mutex_lock(&mdev->state_lock);
-
- if (!mdev->device_up) {
- en_err(priv, "Cannot open - device down/disabled\n");
- err = -EBUSY;
- goto out;
- }
- /* Reset HW statistics and performance counters */
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
+ memset(&priv->pkstats, 0, sizeof(priv->pkstats));
+ memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i].bytes = 0;
priv->tx_ring[i].packets = 0;
+ priv->tx_ring[i].tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i].bytes = 0;
priv->rx_ring[i].packets = 0;
+ priv->rx_ring[i].csum_ok = 0;
+ priv->rx_ring[i].csum_none = 0;
}
+}
+
+static int mlx4_en_open(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+
+ if (!mdev->device_up) {
+ en_err(priv, "Cannot open - device down/disabled\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Reset HW statistics and SW counters */
+ mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
if (err)
@@ -863,7 +892,8 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info)
- mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
@@ -974,6 +1004,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int mlx4_en_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (features & NETIF_F_LOOPBACK)
+ priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ else
+ priv->ctrl_flags &=
+ cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+ return 0;
+
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -990,6 +1035,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
+ .ndo_set_features = mlx4_en_set_features,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1068,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
+ priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
priv->mac_index = -1;
@@ -1088,6 +1136,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features |= NETIF_F_LOOPBACK;
mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 03c84cd78cd..331791467a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -41,13 +41,6 @@
#include "mlx4_en.h"
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
- u64 mac, u64 clear, u8 mode)
-{
- return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
- MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
- MLX4_CMD_TIME_CLASS_B);
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
- u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_general_context *context;
- int err;
- u32 in_mod;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->flags = SET_PORT_GEN_ALL_VALID;
- context->mtu = cpu_to_be16(mtu);
- context->pptx = (pptx * (!pfctx)) << 7;
- context->pfctx = pfctx;
- context->pprx = (pprx * (!pfcrx)) << 7;
- context->pfcrx = pfcrx;
-
- in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
- u8 promisc)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_rqp_calc_context *context;
- int err;
- u32 in_mod;
- u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
- MCAST_DIRECT : MCAST_DEFAULT;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
- return 0;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = dev->caps.log_num_macs;
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
- base_qpn);
- context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
- base_qpn);
- context->intra_no_vlan = 0;
- context->no_vlan = MLX4_NO_VLAN_IDX;
- context->intra_vlan_miss = 0;
- context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
- in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
- MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 19eb244f516..6934fd7e66e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,49 +39,6 @@
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
-enum {
- MLX4_CMD_SET_VLAN_FLTR = 0x47,
- MLX4_CMD_SET_MCAST_FLTR = 0x48,
- MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
- MCAST_DIRECT_ONLY = 0,
- MCAST_DIRECT = 1,
- MCAST_DEFAULT = 2
-};
-
-struct mlx4_set_port_general_context {
- u8 reserved[3];
- u8 flags;
- u16 reserved2;
- __be16 mtu;
- u8 pptx;
- u8 pfctx;
- u16 reserved3;
- u8 pprx;
- u8 pfcrx;
- u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
- __be32 base_qpn;
- u8 rererved;
- u8 n_mac;
- u8 n_vlan;
- u8 n_prio;
- u8 reserved2[3];
- u8 mac_miss;
- u8 intra_no_vlan;
- u8 no_vlan;
- u8 intra_vlan_miss;
- u8 vlan_miss;
- u8 reserved3[3];
- u8 no_vlan_prio;
- __be32 promisc;
- __be32 mcast;
-};
-
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec8a9d..bcbc54c1694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
- context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c2df6c35860..d4ad8c226b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -168,8 +168,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
return 0;
err:
- while (i--)
+ while (i--) {
+ dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
+ pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
+ PCI_DMA_FROMDEVICE);
put_page(skb_frags[i].page);
+ }
return -ENOMEM;
}
@@ -380,12 +384,12 @@ err_allocator:
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&ring->wqres.buf);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
}
@@ -541,6 +545,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
+ struct ethhdr *ethh;
+ u64 s_mac;
if (!priv->port_up)
return 0;
@@ -577,6 +583,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ /* Get pointer to first fragment since we haven't skb yet and
+ * cast it to ethhdr struct */
+ ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+ skb_frags[0].offset);
+ s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+ /* If source MAC is equal to our own MAC and not performing
+ * the selftest or flb disabled - drop the packet */
+ if (s_mac == priv->mac &&
+ (!(dev->features & NETIF_F_LOOPBACK) ||
+ !priv->validate_loopback))
+ goto next;
+
/*
* Packet is OK - process it.
*/
@@ -837,9 +856,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
- struct mlx4_en_rss_context *rss_context;
+ struct mlx4_rss_context *rss_context;
+ int rss_rings;
void *ptr;
- u8 rss_mask = 0x3f;
+ u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+ MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -877,18 +898,26 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
- ptr = ((void *) &context) + 0x3c;
+ if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
+ rss_rings = priv->rx_ring_num;
+ else
+ rss_rings = priv->prof->rss_rings;
+
+ ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
- rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
+ rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ if (priv->mdev->profile.udp_rss) {
+ rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ }
rss_context->flags = rss_mask;
- rss_context->hash_fn = 1;
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
rss_context->rss_key[i] = rsskey[i];
- if (priv->mdev->profile.udp_rss)
- rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcecd499..bf2e5d3f177 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -43,7 +43,7 @@
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d901b426753..9ef9038d062 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- struct mlx4_cqe *cqe = cq->buf;
+ struct mlx4_cqe *cqe;
u16 index;
- u16 new_index;
+ u16 new_index, ring_index;
u32 txbbs_skipped = 0;
- u32 cq_last_sav;
-
- /* index always points to the first TXBB of the last polled descriptor */
- index = ring->cons & ring->size_mask;
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- if (index == new_index)
- return;
+ u32 cons_index = mcq->cons_index;
+ int size = cq->size;
+ u32 size_mask = ring->size_mask;
+ struct mlx4_cqe *buf = cq->buf;
if (!priv->port_up)
return;
- /*
- * We use a two-stage loop:
- * - the first samples the HW-updated CQE
- * - the second frees TXBBs until the last sample
- * This lets us amortize CQE cache misses, while still polling the CQ
- * until is quiescent.
- */
- cq_last_sav = mcq->cons_index;
- do {
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ ring_index = ring->cons & size_mask;
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cons_index & size)) {
+ /*
+ * make sure we read the CQE after we read the
+ * ownership bit
+ */
+ rmb();
+
+ /* Skip over last polled CQE */
+ new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
do {
- /* Skip over last polled CQE */
- index = (index + ring->last_nr_txbb) & ring->size_mask;
txbbs_skipped += ring->last_nr_txbb;
-
- /* Poll next CQE */
+ ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ /* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
- priv, ring, index,
- !!((ring->cons + txbbs_skipped) &
- ring->size));
- ++mcq->cons_index;
-
- } while (index != new_index);
+ priv, ring, ring_index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ } while (ring_index != new_index);
+
+ ++cons_index;
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ }
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- } while (index != new_index);
- AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
- (u32) (mcq->cons_index - cq_last_sav));
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
+ mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ (!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ !!vlan_tx_tag_present(skb);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
- MLX4_WQE_CTRL_SOLICITED);
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
ring->tx_csum++;
}
- if (unlikely(priv->validate_loopback)) {
- /* Copy dst mac address to wqe */
- skb_reset_mac_header(skb);
- ethh = eth_hdr(skb);
- if (ethh && ethh->h_dest) {
- mac = mlx4_en_mac_to_u64(ethh->h_dest);
- mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
- mac_l = (u32) (mac & 0xffffffff);
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
- tx_desc->ctrl.imm = cpu_to_be32(mac_l);
- }
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
}
/* Handle LSO (TSO) packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 24ee9677599..9129ace0256 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- u8 log_eq_size;
- u8 reserved2[4];
- u8 eq_period;
- u8 reserved3;
- u8 eq_max_count;
- u8 reserved4[3];
- u8 intr;
- u8 log_page_size;
- u8 reserved5[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- u32 reserved6[2];
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved7[4];
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
- (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- } event;
- u8 reserved3[3];
- u8 owner;
-} __packed;
+ (1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
+ (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+ struct mlx4_eqe *eqe =
+ &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+ return (!!(eqe->owner & 0x80) ^
+ !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+ eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+ struct mlx4_eqe *eqe;
+ u8 slave;
+ int i;
+
+ for (eqe = next_slave_event_eqe(slave_eq); eqe;
+ eqe = next_slave_event_eqe(slave_eq)) {
+ slave = eqe->slave_id;
+
+ /* All active slaves need to receive the event */
+ if (slave == ALL_SLAVES) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i != dev->caps.function &&
+ master->slave_state[i].active)
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to "
+ " generate event "
+ "for slave %d\n", i);
+ }
+ } else {
+ if (mlx4_GEN_EQE(dev, slave, eqe))
+ mlx4_warn(dev, "Failed to generate event "
+ "for slave %d\n", slave);
+ }
+ ++slave_eq->cons;
+ }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+ struct mlx4_eqe *s_eqe =
+ &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+ if ((!!(s_eqe->owner & 0x80)) ^
+ (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+ "No free EQE on slave events queue\n", slave);
+ return;
+ }
+
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ s_eqe->slave_id = slave;
+ /* ensure all information is written before setting the ownersip bit */
+ wmb();
+ s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+ ++slave_eq->prod;
+
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+ struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave =
+ &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active) {
+ /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ return;
+ }
+
+ slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_flr_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int i;
+ int err;
+
+ mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+
+ if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+ "clean slave: %d\n", i);
+
+ mlx4_delete_all_resources_for_slave(dev, i);
+ /*return the slave to running mode*/
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+ slave_state[i].is_slave_going_down = 0;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*notify the FW:*/
+ err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to notify FW on "
+ "FLR done (slave:%d)\n", i);
+ }
+ }
+}
+
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
+ int slave = 0;
+ int ret;
+ u32 flr_slave;
+ u8 update_slave_state;
+ int i;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
- eqe->type);
+ mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the QP */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_QP,
+ be32_to_cpu(eqe->event.qp.qpn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "QP event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+
+ }
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+ __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
- mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
- eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the SRQ */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_SRQ,
+ be32_to_cpu(eqe->event.srq.srqn)
+ & 0xffffff,
+ &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_warn(dev, "SRQ event %02x(%02x) "
+ "on EQ %d at index %u: could"
+ " not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+ " event: %02x(%02x)\n", __func__,
+ slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_warn(dev, "%s: sending event "
+ "%02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+ if (mlx4_is_master(dev))
+ /*change the state of all slave's port
+ * to down:*/
+ for (i = 0; i < dev->num_slaves; i++) {
+ mlx4_dbg(dev, "%s: Sending "
+ "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ " to slave: %d, port:%d\n",
+ __func__, i, port);
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
} else {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_UP,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+ if (mlx4_is_master(dev)) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
+ }
}
break;
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
- mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_CQ,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "CQ event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_cq_event(dev,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff,
eqe->type);
break;
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_COMM_CHANNEL:
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Received comm channel event "
+ "for non master device\n");
+ break;
+ }
+ memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+ eqe->event.comm_channel_arm.bit_vec,
+ sizeof eqe->event.comm_channel_arm.bit_vec);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.comm_work);
+ break;
+
+ case MLX4_EVENT_TYPE_FLR_EVENT:
+ flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Non-master function received"
+ "FLR event\n");
+ break;
+ }
+
+ mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+ if (flr_slave > dev->num_slaves) {
+ mlx4_warn(dev,
+ "Got FLR for unknown function: %d\n",
+ flr_slave);
+ update_slave_state = 0;
+ } else
+ update_slave_state = 1;
+
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (update_slave_state) {
+ priv->mfunc.master.slave_state[flr_slave].active = false;
+ priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+ priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+ }
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_flr_event_work);
+ break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
- eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+ "index %u. owner=%x, nent=0x%x, slave=%x, "
+ "ownership=%s\n",
+ eqe->type, eqe->subtype, eq->eqn,
+ eq->cons_index, eqe->owner, eq->nent,
+ eqe->slave_id,
+ !!(eqe->owner & 0x80) ^
+ !!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- }
+ };
++eq->cons_index;
eqes_found = 1;
@@ -290,25 +505,55 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq =
+ priv->mfunc.master.slave_state[slave].event_eq;
+ u32 in_modifier = vhcr->in_modifier;
+ u32 eqn = in_modifier & 0x1FF;
+ u64 in_param = vhcr->in_param;
+ int err = 0;
+ int i;
+
+ if (slave == dev->caps.function)
+ err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ if (!err)
+ for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
+ if (in_param & (1LL << i))
+ event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
+
+ return err;
+}
+
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
+ MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
+ 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -570,8 +815,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
int err;
int i;
- priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
- mlx4_num_eq_uar(dev), GFP_KERNEL);
+ priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
+ sizeof *priv->eq_table.uar_map,
+ GFP_KERNEL);
if (!priv->eq_table.uar_map) {
err = -ENOMEM;
goto err_out_free;
@@ -585,14 +831,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
- err = mlx4_map_clr_int(dev);
- if (err)
- goto err_out_bitmap;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_bitmap;
- priv->eq_table.clr_mask =
- swab32(1 << (priv->eq_table.inta_pin & 31));
- priv->eq_table.clr_int = priv->clr_base +
- (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ }
priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +948,8 @@ err_out_unmap:
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
--i;
}
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
err_out_bitmap:
@@ -725,7 +974,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
@@ -748,7 +998,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
err = mlx4_NOP(dev);
/* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X))
+ if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
return err;
/* A loop over all completion vectors, for each vector we will check
@@ -786,7 +1036,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
struct mlx4_priv *priv = mlx4_priv(dev);
int vec = 0, err = 0, i;
- spin_lock(&priv->msix_ctl.pool_lock);
+ mutex_lock(&priv->msix_ctl.pool_lock);
for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
if (~priv->msix_ctl.pool_bm & 1ULL << i) {
priv->msix_ctl.pool_bm |= 1ULL << i;
@@ -808,7 +1058,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
}
- spin_unlock(&priv->msix_ctl.pool_lock);
+ mutex_unlock(&priv->msix_ctl.pool_lock);
if (vec) {
*vector = vec;
@@ -829,13 +1079,13 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
if (likely(i >= 0)) {
/*sanity check , making sure were not trying to free irq's
Belonging to a legacy EQ*/
- spin_lock(&priv->msix_ctl.pool_lock);
+ mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
}
- spin_unlock(&priv->msix_ctl.pool_lock);
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 435ca6e4973..9ea7cabcaf3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -32,6 +32,7 @@
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static int enable_qos;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
@@ -139,12 +140,178 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 field;
+ u32 size;
+ int err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+
+ if (vhcr->op_modifier == 1) {
+ field = vhcr->in_modifier;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+ field = 0; /* ensure fvl bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ } else if (vhcr->op_modifier == 0) {
+ field = 1 << 7; /* enable only ethernet interface */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+ field = dev->caps.num_ports;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+ size = 0; /* no PF behavious is set for now */
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mgms + dev->caps.num_amgms;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 size;
+ int i;
+ int err = 0;
+
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+ if (!(field & (1 << 7))) {
+ mlx4_err(dev, "The host doesn't support eth interface\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+ func_cap->num_ports = field;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ func_cap->pf_context_behaviour = size;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ func_cap->max_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ func_cap->reserved_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ for (i = 1; i <= func_cap->num_ports; ++i) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+ MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & (1 << 7)) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ if (field & (1 << 6)) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ func_cap->physical_port[i] = field;
+ }
+
+ /* All other resources are allocated by the master, but we still report
+ * 'num' and 'reserved' capabilities as follows:
+ * - num remains the maximum resource index
+ * - 'num - reserved' is the total available objects of a resource, but
+ * resource indices may be less than 'reserved'
+ * TODO: set per-resource quotas */
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +396,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
if (err)
goto out;
@@ -396,12 +563,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
+ dev_cap->suggested_type[i] = (field >> 3) & 1;
+ dev_cap->default_sense[i] = (field >> 4) & 1;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +640,61 @@ out:
return err;
}
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 def_mac;
+ u8 port_type;
+ int err;
+
+#define MLX4_PORT_SUPPORT_IB (1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
+ ~MLX4_PORT_SUGGEST_TYPE & \
+ ~MLX4_PORT_DEFAULT_SENSE)
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ if (!err && dev->caps.function != slave) {
+ /* set slave default_mac address */
+ MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+ def_mac += slave << 8;
+ MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+ /* get port type - currently only eth is enabled */
+ MLX4_GET(port_type, outbox->buf,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ /* Allow only Eth port, no link sensing allowed */
+ port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+ /* check eth is enabled for this port */
+ if (!(port_type & 2))
+ mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+ MLX4_PUT(outbox->buf, port_type,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ }
+
+ return err;
+}
+
+int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+ struct mlx4_cmd_mailbox *outbox = ptr;
+
+ return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +744,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
goto out;
nent = 0;
@@ -528,7 +754,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
}
if (nent)
- err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -557,13 +784,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +808,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_PPF_ID 0x09
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
@@ -589,13 +819,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+#define QUERY_FW_COMM_BASE_OFFSET 0x40
+#define QUERY_FW_COMM_BAR_OFFSET 0x48
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -608,6 +841,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +885,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+ MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+ MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
+ fw->comm_bar = (fw->comm_bar >> 6) * 2;
+ mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+ fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
/*
@@ -711,7 +952,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -743,6 +984,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1073,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* UAR attributes */
- MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+ MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1086,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return err;
}
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *outbox;
+ int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_QUERY_HCA,
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
+ if (err)
+ goto out;
+
+ MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+
+ /* Enable port only if it was previously disabled */
+ if (!priv->mfunc.master.init_port_ref[port]) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.slave_state[slave].init_port_mask |=
+ (1 << port);
+ }
+ ++priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1224,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+ (1 << port)))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+ if (priv->mfunc.master.init_port_ref[port] == 1) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ --priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+ MLX4_CMD_NATIVE);
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (ret)
return ret;
@@ -929,7 +1296,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1305,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
- MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_read);
@@ -947,6 +1315,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index bf5ec228652..e1a5fa56bcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -111,11 +111,29 @@ struct mlx4_dev_cap {
u64 max_icm_sz;
int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters;
};
+struct mlx4_func_cap {
+ u8 num_ports;
+ u8 flags;
+ u32 pf_context_behaviour;
+ int qp_quota;
+ int cq_quota;
+ int srq_quota;
+ int mpt_quota;
+ int mtt_quota;
+ int max_eq;
+ int reserved_eq;
+ int mcg_quota;
+ u8 physical_port[MLX4_MAX_PORTS + 1];
+ u8 port_flags[MLX4_MAX_PORTS + 1];
+};
+
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
@@ -133,6 +151,7 @@ struct mlx4_init_hca_param {
u64 dmpt_base;
u64 cmpt_base;
u64 mtt_base;
+ u64 global_caps;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
u8 log_num_qps;
@@ -143,6 +162,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 uar_page_sz; /* log pg sz in 4k chunks */
};
struct mlx4_init_ib_param {
@@ -167,12 +187,19 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
int mlx4_QUERY_FW(struct mlx4_dev *dev);
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fdf44c..a9ade1c3cad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index ca6feb55bd9..b4e9f6f5cc0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
- mlx4_start_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
- mlx4_stop_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 94bbc85a532..d498f049c74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -40,6 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/delay.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+ " of qp per mcg, for example:"
+ " 10 gives 248.range: 9<="
+ " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK 0
+#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mlx4_profile default_profile = {
- .num_qp = 1 << 17,
+ .num_qp = 1 << 18,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
- .num_mpt = 1 << 17,
- .num_mtt = 1 << 20,
+ .num_mpt = 1 << 19,
+ .num_mtt = 1 << 20, /* It is really num mtt segements */
};
-static int log_num_mac = 2;
+static int log_num_mac = 7;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
-static int use_prio;
+static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+ "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+ struct list_head list;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+ return dev->caps.reserved_eqs +
+ MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
- dev->caps.port_mask = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
- if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
- dev->caps.port_mask |= 1 << (i - 1);
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+ dev->caps.default_sense[i] = dev_cap->default_sense[i];
dev->caps.trans_type[i] = dev_cap->trans_type[i];
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
dev->caps.wavelength[i] = dev_cap->wavelength[i];
dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
+ dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
- dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
- dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- dev->caps.mtts_per_seg);
+ dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
- dev->caps.reserved_uars = dev_cap->reserved_uars;
+
+ /* The first 128 UARs are used for EQ doorbells */
+ dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0;
- dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
+
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+ if (dev->pdev->device != 0x1003)
+ dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
- if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
- else
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- dev->caps.possible_type[i] = dev->caps.port_type[i];
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+ if (dev->caps.supported_type[i]) {
+ /* if only ETH is supported - assign ETH */
+ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ /* if only IB is supported,
+ * assign IB only if SRIOV is off*/
+ else if (dev->caps.supported_type[i] ==
+ MLX4_PORT_TYPE_IB) {
+ if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_NONE;
+ else
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_IB;
+ /* if IB and ETH are supported,
+ * first of all check if SRIOV is on */
+ } else if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ else {
+ /* In non-SRIOV mode, we set the port type
+ * according to user selection of port type,
+ * if usere selected none, take the FW hint */
+ if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+ MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = port_type_array[i-1];
+ }
+ }
+ /*
+ * Link sensing is allowed on the port if 3 conditions are true:
+ * 1. Both protocols are supported on the port.
+ * 2. Different types are supported on the port
+ * 3. FW declared that it supports link sensing
+ */
mlx4_priv(dev)->sense.sense_allowed[i] =
- dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+ ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+ /*
+ * If "default_sense" bit is set, we move the port to "AUTO" mode
+ * and perform sense_port FW command to try and set the correct
+ * port type from beginning
+ */
+ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+ enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+ dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+ mlx4_SENSE_PORT(dev, i, &sensed_port);
+ if (sensed_port != MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = sensed_port;
+ } else {
+ dev->caps.possible_type[i] = dev->caps.port_type[i];
+ }
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- mlx4_set_port_mask(dev);
-
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,148 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i;
+ int ret = 0;
+
+ for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ if (s_state->active && s_state->last_cmd !=
+ MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "%s: slave: %d is still active\n",
+ __func__, i);
+ ret++;
+ }
+ }
+ return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave;
+
+ if (!mlx4_is_master(dev))
+ return 0;
+
+ s_slave = &priv->mfunc.master.slave_state[slave];
+ return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+ int err;
+ u32 page_size;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_func_cap func_cap;
+ struct mlx4_init_hca_param hca_param;
+ int i;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ if (err) {
+ mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ return err;
+ }
+
+ /*fail if the hca has an unknown capability */
+ if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+ HCA_GLOBAL_CAP_MASK) {
+ mlx4_err(dev, "Unknown hca global capabilities\n");
+ return -ENOSYS;
+ }
+
+ mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ page_size = ~dev->caps.page_size_cap + 1;
+ mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+ if (page_size > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ /* slave gets uar page size from QUERY_HCA fw command */
+ dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+ /* TODO: relax this assumption */
+ if (dev->caps.uar_page_size != PAGE_SIZE) {
+ mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+ dev->caps.uar_page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+ PF_CONTEXT_BEHAVIOUR_MASK) {
+ mlx4_err(dev, "Unknown pf context behaviour\n");
+ return -ENOSYS;
+ }
+
+ dev->caps.num_ports = func_cap.num_ports;
+ dev->caps.num_qps = func_cap.qp_quota;
+ dev->caps.num_srqs = func_cap.srq_quota;
+ dev->caps.num_cqs = func_cap.cq_quota;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
+ dev->caps.num_mpts = func_cap.mpt_quota;
+ dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->caps.num_pds = MLX4_NUM_PDS;
+ dev->caps.num_mgms = 0;
+ dev->caps.num_amgms = 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+ if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev->caps.uar_page_size * (dev->caps.num_uars -
+ dev->caps.reserved_uars) >
+ pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+#if 0
+ mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+ mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+ dev->caps.num_uars, dev->caps.reserved_uars,
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ pci_resource_len(dev->pdev, 2));
+ mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+ dev->caps.reserved_eqs);
+ mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+ dev->caps.num_pds, dev->caps.reserved_pds,
+ dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+ return 0;
+}
/*
* Change the port configuration of the device.
@@ -297,15 +531,14 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
for (port = 0; port < dev->caps.num_ports; port++) {
/* Change the port type only if the new type is different
* from the current, and not set to Auto */
- if (port_types[port] != dev->caps.port_type[port + 1]) {
+ if (port_types[port] != dev->caps.port_type[port + 1])
change = 1;
- dev->caps.port_type[port + 1] = port_types[port];
- }
}
if (change) {
mlx4_unregister_device(dev);
for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port);
+ dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port);
if (err) {
mlx4_err(dev, "Failed to set port %d, "
@@ -377,7 +610,8 @@ static ssize_t set_port_type(struct device *dev,
types[i] = mdev->caps.port_type[i+1];
}
- if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +685,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int num_eqs;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
@@ -480,12 +715,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
- cmpt_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+ cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
if (err)
goto err_cq;
@@ -509,6 +746,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
+ int num_eqs;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +778,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
+
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs,
- 0, 0);
+ num_eqs, num_eqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -563,7 +804,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
- dev->caps.num_mtt_segs,
+ dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +891,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
* and it's a lot easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
- init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ init_hca->mc_base,
+ mlx4_get_mgm_entry_size(dev),
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
@@ -726,6 +968,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ down(&priv->cmd.slave_sem);
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+ mlx4_warn(dev, "Failed to close slave function.\n");
+ up(&priv->cmd.slave_sem);
+}
+
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +985,13 @@ static int map_bf_area(struct mlx4_dev *dev)
resource_size_t bf_len;
int err = 0;
- bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
- bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ if (!dev->caps.bf_reg_size)
+ return -ENXIO;
+
+ bf_start = pci_resource_start(dev->pdev, 2) +
+ (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) -
+ (dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
@@ -751,10 +1008,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_bf_area(dev);
- mlx4_CLOSE_HCA(dev, 0);
- mlx4_free_icms(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ if (mlx4_is_slave(dev))
+ mlx4_slave_exit(dev);
+ else {
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 dma = (u64) priv->mfunc.vhcr_dma;
+ int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+ int ret_from_reset = 0;
+ u32 slave_read;
+ u32 cmd_channel_ver;
+
+ down(&priv->cmd.slave_sem);
+ priv->cmd.max_cmds = 1;
+ mlx4_warn(dev, "Sending reset\n");
+ ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+ MLX4_COMM_TIME);
+ /* if we are in the middle of flr the slave will try
+ * NUM_OF_RESET_RETRIES times before leaving.*/
+ if (ret_from_reset) {
+ if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+ msleep(SLEEP_TIME_IN_RESET);
+ while (ret_from_reset && num_of_reset_retries) {
+ mlx4_warn(dev, "slave is currently in the"
+ "middle of FLR. retrying..."
+ "(try num:%d)\n",
+ (NUM_OF_RESET_RETRIES -
+ num_of_reset_retries + 1));
+ ret_from_reset =
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+ 0, MLX4_COMM_TIME);
+ num_of_reset_retries = num_of_reset_retries - 1;
+ }
+ } else
+ goto err;
+ }
+
+ /* check the driver version - the slave I/F revision
+ * must match the master's */
+ slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+ cmd_channel_ver = mlx4_comm_get_version();
+
+ if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+ MLX4_COMM_GET_IF_REV(slave_read)) {
+ mlx4_err(dev, "slave driver version is not supported"
+ " by the master\n");
+ goto err;
+ }
+
+ mlx4_warn(dev, "Sending vhcr0\n");
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+ goto err;
+ up(&priv->cmd.slave_sem);
+ return 0;
+
+err:
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+ up(&priv->cmd.slave_sem);
+ return -EIO;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1096,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
u64 icm_size;
int err;
- err = mlx4_QUERY_FW(dev);
- if (err) {
- if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
- else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
- return err;
- }
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+ mlx4_info(dev, "non-primary physical function, skipping.\n");
+ else
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ goto unmap_bf;
+ }
- err = mlx4_load_fw(dev);
- if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
- return err;
- }
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ goto unmap_bf;
+ }
- mlx4_cfg.log_pg_sz_m = 1;
- mlx4_cfg.log_pg_sz = 0;
- err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
- if (err)
- mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ mlx4_cfg.log_pg_sz_m = 1;
+ mlx4_cfg.log_pg_sz = 0;
+ err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+ if (err)
+ mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
- err = mlx4_dev_cap(dev, &dev_cap);
- if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
- goto err_stop_fw;
- }
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
- profile = default_profile;
+ profile = default_profile;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
- if ((long long) icm_size < 0) {
- err = icm_size;
- goto err_stop_fw;
- }
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+ &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
- if (map_bf_area(dev))
- mlx4_dbg(dev, "Failed to map blue flame area\n");
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca.uar_page_sz = PAGE_SHIFT - 12;
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
- if (err)
- goto err_stop_fw;
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+ } else {
+ err = mlx4_init_slave(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize slave\n");
+ goto unmap_bf;
+ }
- err = mlx4_INIT_HCA(dev, &init_hca);
- if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
+ err = mlx4_slave_cap(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to obtain slave caps\n");
+ goto err_close;
+ }
}
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+ /*Only the master set the ports, all the rest got it from it.*/
+ if (!mlx4_is_slave(dev))
+ mlx4_set_port_mask(dev);
+
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1178,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_CLOSE_HCA(dev, 0);
+ mlx4_close_hca(dev);
err_free_icm:
- mlx4_free_icms(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_free_icms(dev);
err_stop_fw:
+ if (!mlx4_is_slave(dev)) {
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+ }
+unmap_bf:
unmap_bf_area(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
return err;
}
@@ -986,55 +1337,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_counters_table_free;
+ goto err_mcg_table_free;
}
- for (port = 1; port <= dev->caps.num_ports; port++) {
- enum mlx4_port_type port_type = 0;
- mlx4_SENSE_PORT(dev, port, &port_type);
- if (port_type)
- dev->caps.port_type[port] = port_type;
- ib_port_default_caps = 0;
- err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
- if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing with "
- "caps = 0\n", port, err);
- dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
- err = mlx4_check_ext_port_caps(dev, port);
- if (err)
- mlx4_warn(dev, "failed to get port %d extended "
- "port capabilities support info (%d)."
- " Assuming not supported\n", port, err);
+ if (!mlx4_is_slave(dev)) {
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port,
+ &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing "
+ "with caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+ err = mlx4_check_ext_port_caps(dev, port);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d extended "
+ "port capabilities support info (%d)."
+ " Assuming not supported\n",
+ port, err);
- err = mlx4_SET_PORT(dev, port);
- if (err) {
- mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
- goto err_mcg_table_free;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_counters_table_free;
+ }
}
}
- mlx4_set_port_mask(dev);
return 0;
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1081,8 +1433,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
- nreq);
+ /* In multifunction mode each function gets 2 msi-X vectors
+ * one for data path completions anf the other for asynch events
+ * or command completions */
+ if (mlx4_is_mfunc(dev)) {
+ nreq = 2;
+ } else {
+ nreq = min_t(int, dev->caps.num_eqs -
+ dev->caps.reserved_eqs, nreq);
+ }
+
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -1138,16 +1498,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
- mlx4_init_mac_table(dev, &info->mac_table);
- mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ if (!mlx4_is_slave(dev)) {
+ INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn =
+ dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
+ }
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev))
+ info->port_attr.attr.mode = S_IRUGO;
+ else {
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.store = set_port_type;
+ }
info->port_attr.show = show_port_type;
- info->port_attr.store = set_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1588,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
kfree(priv->steer);
}
+static int extended_func_num(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE 0x8069c
+#define MLX4_OWNER_SIZE 4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+ u32 ret;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return -ENOMEM;
+ }
+
+ ret = readl(owner);
+ iounmap(owner);
+ return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return;
+ }
+ writel(0, owner);
+ msleep(1000);
+ iounmap(owner);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
@@ -1235,13 +1643,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
"aborting.\n");
return err;
}
-
+ if (num_vfs > MLX4_MAX_NUM_VF) {
+ printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF);
+ return -EINVAL;
+ }
/*
- * Check for BARs. We expect 0: 1MB
+ * Check for BARs.
*/
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) != 1 << 20) {
- dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+ !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing DCS, aborting."
+ "(id == 0X%p, id->driver_data: 0x%lx,"
+ " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+ id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1305,42 +1720,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ /* Detect if this device is a virtual function */
+ if (id && id->driver_data & MLX4_VF) {
+ /* When acting as pf, we normally skip vfs unless explicitly
+ * requested to probe them. */
+ if (num_vfs && extended_func_num(pdev) > probe_vf) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+ dev->flags |= MLX4_FLAG_SLAVE;
+ } else {
+ /* We reset the device and enable SRIOV only for physical
+ * devices. Try to claim ownership on the device;
+ * if already taken, skip -- do not allow multiple PFs */
+ err = mlx4_get_ownership(dev);
+ if (err) {
+ if (err < 0)
+ goto err_free_dev;
+ else {
+ mlx4_warn(dev, "Multiple PFs not yet supported."
+ " Skipping PF.\n");
+ err = -EINVAL;
+ goto err_free_dev;
+ }
+ }
- /*
- * Now reset the HCA before we touch the PCI capabilities or
- * attempt a firmware command, since a boot ROM may have left
- * the HCA in an undefined state.
- */
- err = mlx4_reset(dev);
- if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
- goto err_free_dev;
+ if (num_vfs) {
+ mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ mlx4_err(dev, "Failed to enable sriov,"
+ "continuing without sriov enabled"
+ " (err = %d).\n", err);
+ num_vfs = 0;
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = num_vfs;
+ }
+ }
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_rel_own;
+ }
}
+slave_start:
if (mlx4_cmd_init(dev)) {
mlx4_err(dev, "Failed to init command interface, aborting.\n");
- goto err_free_dev;
+ goto err_sriov;
+ }
+
+ /* In slave functions, the communication channel must be initialized
+ * before posting commands. Also, init num_slaves before calling
+ * mlx4_init_hca */
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_master(dev))
+ dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+ else {
+ dev->num_slaves = 0;
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init slave mfunc"
+ " interface, aborting.\n");
+ goto err_cmd;
+ }
+ }
}
err = mlx4_init_hca(dev);
- if (err)
- goto err_cmd;
+ if (err) {
+ if (err == -EACCES) {
+ /* Not primary Physical function
+ * Running in slave mode */
+ mlx4_cmd_cleanup(dev);
+ dev->flags |= MLX4_FLAG_SLAVE;
+ dev->flags &= ~MLX4_FLAG_MASTER;
+ goto slave_start;
+ } else
+ goto err_mfunc;
+ }
+
+ /* In master functions, the communication channel must be initialized
+ * after obtaining its address from fw */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init master mfunc"
+ "interface, aborting.\n");
+ goto err_close;
+ }
+ }
err = mlx4_alloc_eq_table(dev);
if (err)
- goto err_close;
+ goto err_master_mfunc;
priv->msix_ctl.pool_bm = 0;
- spin_lock_init(&priv->msix_ctl.pool_lock);
+ mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
-
- err = mlx4_init_steering(dev);
- if (err)
+ if ((mlx4_is_mfunc(dev)) &&
+ !(dev->flags & MLX4_FLAG_MSI_X)) {
+ mlx4_err(dev, "INTx is not supported in multi-function mode."
+ " aborting.\n");
goto err_free_eq;
+ }
+
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+ }
err = mlx4_setup_hca(dev);
- if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+ if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+ !mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
@@ -1383,20 +1888,37 @@ err_port:
mlx4_cleanup_uar_table(dev);
err_steer:
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
err_free_eq:
mlx4_free_eq_table(dev);
+err_master_mfunc:
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
mlx4_close_hca(dev);
+err_mfunc:
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_cmd:
mlx4_cmd_cleanup(dev);
+err_sriov:
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+ pci_disable_sriov(pdev);
+
+err_rel_own:
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
err_free_dev:
kfree(priv);
@@ -1424,6 +1946,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
int p;
if (dev) {
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ }
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -1443,17 +1971,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev);
+
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Disabling sriov\n");
+ pci_disable_sriov(pdev);
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -1468,33 +2010,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
- { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+ /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+ /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+ /* MT25408 "Hermon" QDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+ /* MT25408 "Hermon" DDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+ /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+ /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+ /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+ /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+ /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+ { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+ /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+ /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
{ 0, }
};
@@ -1523,6 +2080,12 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ /* Check if module param for ports type has legal combination */
+ if (port_type_array[0] == false && port_type_array[1] == true) {
+ printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ port_type_array[0] = true;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 978688c3104..ca574d850b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -44,28 +44,47 @@
static const u8 zero_gid[16]; /* automatically initialized to 0 */
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+ return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+ return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
struct mlx4_cmd_mailbox *mailbox)
{
u32 in_mod;
- in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ in_mod = (u32) port << 16 | steer << 1;
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
- MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
- MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (!err)
*hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
* Add new entry to steering data structure.
* All promisc QPs should be added as well
*/
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_promisc_qp *dqp = NULL;
u32 prot;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* If the given qpn is also a promisc qp,
* it should be inserted to duplicates list
*/
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (pqp) {
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* don't add already existing qpn */
if (pqp->qpn == qpn)
continue;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* out of space */
err = -ENOMEM;
goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
}
/* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
return 0; /* nothing to do */
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
- if (qpn == dqp->qpn)
+ if (qpn == pqp->qpn)
return 0; /* qp is already duplicated */
}
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* Check whether a qpn is a duplicate on steering entry
* If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
- if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ if (!get_promisc_qp(dev, 0, steer, qpn))
return false;
/* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
u32 members_count;
bool ret = false;
int i;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; i++) {
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
- if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
@@ -332,7 +344,7 @@ out:
return ret;
}
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool found;
int last_index;
int err;
- u8 pf_num;
struct mlx4_priv *priv = mlx4_priv(dev);
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
- if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ if (get_promisc_qp(dev, 0, steer, qpn)) {
err = 0; /* Noting to do, already exists */
goto out_mutex;
}
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
if (!found) {
/* Need to add the qpn to mgm */
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_list;
@@ -439,7 +450,7 @@ out_mutex:
return err;
}
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool back_to_list = false;
int loc, i;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (unlikely(!pqp)) {
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
/* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
goto out_list;
}
mgm = mailbox->buf;
+ memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_mailbox;
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
index += dev->caps.num_mgms;
+ new_entry = 1;
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
goto out;
@@ -696,9 +707,9 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ new_steering_entry(dev, port, steer, index, qp->qpn);
else
- existing_steering_entry(dev, 0, port, steer,
+ existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
/* if this pq is also a promisc qp, it shouldn't be removed */
if (prot == MLX4_PROT_ETH &&
- check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ check_duplicate_entry(dev, port, steer, index, qp->qpn))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm->qp[i - 1] = 0;
if (prot == MLX4_PROT_ETH)
- removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ removed_entry = can_remove_steering_entry(dev, port, steer,
+ index, qp->qpn);
if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
@@ -828,6 +840,34 @@ out:
return err;
}
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 attach, u8 block_loopback,
+ enum mlx4_protocol prot)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err = 0;
+ int qpn;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EBADF;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, gid, 16);
+ qpn = qp->qpn;
+ qpn |= (prot << 28);
+ if (attach && block_loopback)
+ qpn |= (1 << 31);
+
+ err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+ MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- return mlx4_qp_attach_common(dev, qp, gid,
- block_mcast_loopback, prot,
- steer);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- }
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+ struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+ u8 port = vhcr->in_param >> 62;
+ enum mlx4_steer_type steer = vhcr->in_modifier;
+
+ /* Promiscuous unicast is not allowed in mfunc */
+ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+ return 0;
+
+ if (vhcr->op_modifier)
+ return add_promisc_qp(dev, port, steer, qpn);
+ else
+ return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+ enum mlx4_steer_type steer, u8 add, u8 port)
+{
+ return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+ MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+}
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
@@ -884,27 +995,34 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
- return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
- return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+ return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 5dfa68ffc11..28f8251561f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,21 +46,25 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "July 14, 2011"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "1.1"
+#define DRV_RELDATE "Dec, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
- MLX4_CLR_INT_SIZE = 0x00008
+ MLX4_CLR_INT_SIZE = 0x00008,
+ MLX4_SLAVE_COMM_BASE = 0x0,
+ MLX4_COMM_PAGESIZE = 0x1000
};
enum {
- MLX4_MGM_ENTRY_SIZE = 0x100,
- MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8
+ MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+ MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -80,6 +84,94 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
+enum mlx4_mr_state {
+ MLX4_MR_DISABLED = 0,
+ MLX4_MR_EN_HW,
+ MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME 10000
+enum {
+ MLX4_COMM_CMD_RESET,
+ MLX4_COMM_CMD_VHCR0,
+ MLX4_COMM_CMD_VHCR1,
+ MLX4_COMM_CMD_VHCR2,
+ MLX4_COMM_CMD_VHCR_EN,
+ MLX4_COMM_CMD_VHCR_POST,
+ MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES 10
+#define SLEEP_TIME_IN_RESET (2 * 1000)
+enum mlx4_resource {
+ RES_QP,
+ RES_CQ,
+ RES_SRQ,
+ RES_XRCD,
+ RES_MPT,
+ RES_MTT,
+ RES_MAC,
+ RES_VLAN,
+ RES_EQ,
+ RES_COUNTER,
+ MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+ RES_OP_RESERVE,
+ RES_OP_RESERVE_AND_MAP,
+ RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+ u64 in_param;
+ u64 out_param;
+ u32 in_modifier;
+ u32 errno;
+ u16 op;
+ u16 token;
+ u8 op_modifier;
+ u8 e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+ __be64 in_param;
+ __be32 in_modifier;
+ __be64 out_param;
+ __be16 token;
+ u16 reserved;
+ u8 status;
+ u8 flags;
+ __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+ u16 opcode;
+ bool has_inbox;
+ bool has_outbox;
+ bool out_is_imm;
+ bool encode_slave_id;
+ int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox);
+ int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+};
+
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do { \
#define mlx4_warn(mdev, format, arg...) \
dev_warn(&mdev->pdev->dev, format, ##arg)
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
struct mlx4_bitmap {
u32 last;
u32 top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd_flags;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_addr;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ u8 reserved2[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved4[2];
+ __be64 db_rec_addr;
+};
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1;
+ __be16 xrcd;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved4;
+ __be16 wqe_counter;
+ u32 reserved5;
+ __be64 db_rec_addr;
+};
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ u8 port;
+ } __packed sw_event;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -142,6 +381,17 @@ struct mlx4_eq {
struct mlx4_mtt mtt;
};
+struct mlx4_slave_eqe {
+ u8 type;
+ u8 port;
+ u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+ int eqn;
+ u16 token;
+};
+
struct mlx4_profile {
int num_qp;
int rdmarc_per_qp;
@@ -155,16 +405,37 @@ struct mlx4_profile {
struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
+ u64 comm_base;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
u16 fw_pages;
u8 clr_int_bar;
u8 catas_bar;
+ u8 comm_bar;
};
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
+struct mlx4_comm {
+ u32 slave_write;
+ u32 slave_read;
+};
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+ struct list_head list;
+ u64 addr;
+};
struct mlx4_promisc_qp {
struct list_head list;
@@ -177,19 +448,90 @@ struct mlx4_steer_index {
struct list_head duplicates;
};
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_QP_PER_MGM];
+#define MLX4_EVENT_TYPES_NUM 64
+
+struct mlx4_slave_state {
+ u8 comm_toggle;
+ u8 last_cmd;
+ u8 init_port_mask;
+ bool active;
+ u8 function;
+ dma_addr_t vhcr_dma;
+ u16 mtu[MLX4_MAX_PORTS + 1];
+ __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+ struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+ struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+ /* event type to eq number lookup */
+ struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM];
+ u16 eq_pi;
+ u16 eq_ci;
+ spinlock_t lock;
+ /*initialized via the kzalloc*/
+ u8 is_slave_going_down;
+ u32 cookie;
+};
+
+struct slave_list {
+ struct mutex mutex;
+ struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+ spinlock_t lock;
+ /* tree for each resources */
+ struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ /* num_of_slave's lists, one per slave */
+ struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE 128
+struct mlx4_slave_event_eq {
+ u32 eqn;
+ u32 cons;
+ u32 prod;
+ struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+ int proxy_qp0_active;
+ int qp0_active;
+ int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+ int init_port_ref[MLX4_MAX_PORTS + 1];
+ u16 max_mtu[MLX4_MAX_PORTS + 1];
+ int disable_mcast_ref[MLX4_MAX_PORTS + 1];
+ struct mlx4_resource_tracker res_tracker;
+ struct workqueue_struct *comm_wq;
+ struct work_struct comm_work;
+ struct work_struct slave_event_work;
+ struct work_struct slave_flr_event_work;
+ spinlock_t slave_state_lock;
+ __be32 comm_arm_bit_vector[4];
+ struct mlx4_eqe cmd_eqe;
+ struct mlx4_slave_event_eq slave_eq;
+ struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+ struct mlx4_comm __iomem *comm;
+ struct mlx4_vhcr_cmd *vhcr;
+ dma_addr_t vhcr_dma;
+
+ struct mlx4_mfunc_master_ctx master;
};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct semaphore slave_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -197,6 +539,7 @@ struct mlx4_cmd {
u16 token_mask;
u8 use_events;
u8 toggle;
+ u8 comm_toggle;
};
struct mlx4_uar_table {
@@ -287,6 +630,48 @@ struct mlx4_vlan_table {
int max;
};
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
+
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved3[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
struct mlx4_mac_entry {
u64 mac;
};
@@ -312,7 +697,7 @@ struct mlx4_sense {
struct mlx4_msix_ctl {
u64 pool_bm;
- spinlock_t pool_lock;
+ struct mutex pool_lock;
};
struct mlx4_steer {
@@ -333,6 +718,7 @@ struct mlx4_priv {
struct mlx4_fw fw;
struct mlx4_cmd cmd;
+ struct mlx4_mfunc mfunc;
struct mlx4_bitmap pd_bitmap;
struct mlx4_bitmap xrcd_bitmap;
@@ -359,6 +745,7 @@ struct mlx4_priv {
struct list_head bf_list;
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
+ int reserved_mtts;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +790,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +862,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout);
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
@@ -452,12 +995,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource resource_type,
+ int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer);
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+ int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+ struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+ *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+ *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+ return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+ return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+ return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 207b5add3ca..d60335f3c47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.2"
-#define DRV_RELDATE "October 2011"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "Dec 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -325,11 +325,11 @@ struct mlx4_en_port_profile {
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
+ int rss_rings;
};
struct mlx4_en_profile {
int rss_xor;
- int tcp_rss;
int udp_rss;
u8 rss_mask;
u32 active_ports;
@@ -366,16 +366,6 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
-struct mlx4_en_rss_context {
- __be32 base_qpn;
- __be32 default_qpn;
- u16 reserved;
- u8 hash_fn;
- u8 flags;
- __be32 rss_key[10];
- __be32 base_qpn_udp;
-};
-
struct mlx4_en_port_state {
int link_state;
int link_speed;
@@ -463,6 +453,7 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
+ u32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
@@ -485,6 +476,7 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
+ u64 stats_bitmap;
char *mc_addrs;
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
@@ -495,9 +487,9 @@ struct mlx4_en_priv {
enum mlx4_en_wol {
MLX4_EN_WOL_MAGIC = (1ULL << 61),
MLX4_EN_WOL_ENABLED = (1ULL << 62),
- MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
};
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -536,7 +528,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
u32 size, u16 stride);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring);
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index efa3e77355e..25a80d71fb2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,35 +32,17 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
- __be32 flags;
- __be32 qpn;
- __be32 key;
- __be32 pd_flags;
- __be64 start;
- __be64 length;
- __be32 lkey;
- __be32 win_cnt;
- u8 reserved1[3];
- u8 mtt_rep;
- __be64 mtt_seg;
- __be32 mtt_sz;
- __be32 entity_size;
- __be32 first_byte_offset;
-} __packed;
-
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->num_free);
}
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
+ int seg_order;
+ u32 offset;
- seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1)
return -1;
- if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
- seg + (1 << order) - 1)) {
- mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ offset = seg * (1 << log_mtts_per_seg);
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1;
}
- return seg;
+ return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, order);
+ err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_alloc_mtt_range(dev, order);
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order;
- mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
- if (mtt->first_seg == -1)
+ mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->offset == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
+ u32 first_seg;
+ int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+ first_seg = offset / (1 << log_mtts_per_seg);
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, offset);
+ set_param_h(&in_param, order);
+ err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to free mtt range at:"
+ "%d order:%d\n", offset, order);
+ return;
+ }
+ __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
if (mtt->order < 0)
return;
- mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
- return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+ return (u64) mtt->offset * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
- return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
- MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, mailbox->dma, mpt_index,
+ 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
- !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ !mailbox, MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
- int npages, int page_shift, struct mlx4_mr *mr)
+int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ u32 *base_mridx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u32 index;
- int err;
+ u32 mridx;
- index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
- if (index == -1)
+ mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+ if (mridx == -1)
return -ENOMEM;
+ *base_mridx = mridx;
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u64 iova, u64 size, u32 access, int npages,
+ int page_shift, struct mlx4_mr *mr)
+{
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = 0;
- mr->key = hw_index_to_key(index);
+ mr->enabled = MLX4_MR_DISABLED;
+ mr->key = hw_index_to_key(mridx);
+
+ return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ int num_entries)
+{
+ return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+}
- err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ u64 out_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to release mr index:%d\n",
+ index);
+ return;
+ }
+ __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, index);
+ return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+ index);
+ return;
+ }
+ return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ u32 index;
+ int err;
+
+ index = mlx4_mr_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+ access, npages, page_shift, mr);
if (err)
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_mr_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- if (mr->enabled) {
+ if (mr->enabled == MLX4_MR_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
- mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- }
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ mr->enabled = MLX4_MR_EN_SW;
+ }
mlx4_mtt_cleanup(dev, &mr->mtt);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ mlx4_mr_free_reserved(dev, mr);
+ if (mr->enabled)
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mr_release(dev, key_to_hw_index(mr->key));
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
- mpt_entry->mtt_seg = 0;
+ mpt_entry->mtt_addr = 0;
} else {
- mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+ &mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
- mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- dev->caps.mtts_per_seg);
+ mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
-
- mr->enabled = 1;
+ mr->enabled = MLX4_MR_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -373,7 +546,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
__be64 *mtts;
dma_addr_t dma_handle;
int i;
- int s = start_index * sizeof (u64);
- /* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof (u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
- return -EINVAL;
-
- if (start_index & (dev->caps.mtts_per_seg - 1))
- return -EINVAL;
+ mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+ start_index, &dma_handle);
- mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
- s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts)
return -ENOMEM;
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
return 0;
}
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
{
+ int err = 0;
int chunk;
- int err;
+ int mtts_per_page;
+ int max_mtts_first_page;
- if (mtt->order < 0)
- return -EINVAL;
+ /* compute how may mtts fit in the first page */
+ mtts_per_page = PAGE_SIZE / sizeof(u64);
+ max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+ % mtts_per_page;
+
+ chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
-
npages -= chunk;
start_index += chunk;
page_list += chunk;
+
+ chunk = min_t(int, mtts_per_page, npages);
}
+ return err;
+}
- return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ __be64 *inbox = NULL;
+ int chunk;
+ int err = 0;
+ int i;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ if (mlx4_is_mfunc(dev)) {
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ while (npages > 0) {
+ chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+ npages);
+ inbox[0] = cpu_to_be64(mtt->offset + start_index);
+ inbox[1] = 0;
+ for (i = 0; i < chunk; ++i)
+ inbox[i + 2] = cpu_to_be64(page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
+ err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+ if (err) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
+ /* Nothing to do for slaves - all MR handling is forwarded
+ * to the master */
+ if (mlx4_is_slave(dev))
+ return 0;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtt_segs));
+ ilog2(dev->caps.num_mtts /
+ (1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
- if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+ priv->reserved_mtts =
+ mlx4_alloc_mtt_range(dev,
+ fls(dev->caps.reserved_mtts - 1));
+ if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %d is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
+ if (mlx4_is_slave(dev))
+ return;
+ if (priv->reserved_mtts >= 0)
+ mlx4_free_mtt_range(dev, priv->reserved_mtts,
+ fls(dev->caps.reserved_mtts - 1));
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u64 mtt_seg;
+ u64 mtt_offset;
int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
if (err)
return err;
- mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+ mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.first_seg,
+ fmr->mr.mtt.offset,
&fmr->dma_handle);
+
if (!fmr->mtts) {
err = -ENOMEM;
goto err_free;
@@ -619,6 +852,46 @@ err_free:
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+ u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = -ENOMEM;
+
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+ return -EINVAL;
+
+ /* All MTTs must fit in the same page */
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+ return -EINVAL;
+
+ fmr->page_shift = page_shift;
+ fmr->max_pages = max_pages;
+ fmr->max_maps = max_maps;
+ fmr->maps = 0;
+
+ err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+ page_shift, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+ fmr->mr.mtt.offset,
+ &fmr->dma_handle);
+ if (!fmr->mtts) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
if (!fmr->maps)
return;
fmr->maps = 0;
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+ " failed (%d)\n", err);
+ return;
+ }
+
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(fmr->mr.key) &
+ (dev->caps.num_mpts - 1));
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err) {
+ printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+ err);
+ return;
+ }
+ fmr->mr.enabled = MLX4_MR_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
- fmr->mr.enabled = 0;
mlx4_mr_free(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ if (fmr->maps)
+ return -EBUSY;
+
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 260ed259ce9..db4746d0dca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -85,7 +86,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds, 0);
+ (1 << NOT_MASKED_PD_BITS) - 1,
+ dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +110,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
+ int offset;
+
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
- uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ if (mlx4_is_slave(dev))
+ offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+ dev->caps.uar_page_size);
+ else
+ offset = uar->index;
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
uar->map = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +240,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars), 0);
+ dev->caps.reserved_uars, 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index d942aea4927..f44ae555bf4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -44,6 +44,11 @@
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
+#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
+#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
+#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
+#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
+
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
@@ -70,41 +75,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
- __be64 *entries)
-{
- struct mlx4_cmd_mailbox *mailbox;
- u32 in_mod;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
- in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
int err;
- if (reserve) {
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- return err;
- }
- }
qp.qpn = *qpn;
mac &= 0xffffffffffffULL;
@@ -113,16 +89,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- err = mlx4_qp_attach_common(dev, &qp, gid, 0,
- MLX4_PROT_ETH, MLX4_UC_STEER);
- if (err && reserve)
- mlx4_qp_release_range(dev, *qpn, 1);
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ if (err)
+ mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u8 free)
+ u64 mac, int qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
@@ -134,60 +109,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
- if (free)
- mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
+{
+ int err = 0;
+
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if ((mac & MLX4_MAC_MASK) ==
+ (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
+ }
+ /* Mac not found */
+ return -EINVAL;
}
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
- int i, err = 0;
- int free = -1;
+ int index = 0;
+ int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
- if (err)
- return err;
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+ (unsigned long long) mac);
+ index = mlx4_register_mac(dev, port, mac);
+ if (index < 0) {
+ err = index;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
+ return err;
+ }
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return -ENOMEM;
- }
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ *qpn = info->base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ if (err)
+ goto steer_err;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err)
+ goto insert_err;
+ return 0;
- entry->mac = mac;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err) {
+insert_err:
+ kfree(entry);
+
+alloc_err:
+ mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, port, mac);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_entry *entry;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+ (unsigned long long) mac);
+ mlx4_unregister_mac(dev, port, mac);
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+ " qpn %d\n", port,
+ (unsigned long long) mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_qp_release_range(dev, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return err;
}
}
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
- mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+ (unsigned long long) mac, port);
mutex_lock(&table->mutex);
- for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
- if (free < 0 && !table->refs[i]) {
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (free < 0 && !table->entries[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, increase references count */
- ++table->refs[i];
+ /* MAC already registered, Must not have duplicates */
+ err = -EEXIST;
goto out;
}
}
- if (free < 0) {
- err = -ENOMEM;
- goto out;
- }
-
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -197,103 +276,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
}
/* Register new MAC */
- table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
- table->refs[free] = 0;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
table->entries[free] = 0;
goto out;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- *qpn = info->base_qpn + free;
+ err = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
-static int validate_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- int err = 0;
+ u64 out_param;
+ int err;
- if (index < 0 || index >= table->max || !table->entries[index]) {
- mlx4_warn(dev, "No valid Mac entry for the given index\n");
- err = -EINVAL;
- }
- return err;
-}
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
-static int find_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, u64 mac)
-{
- int i;
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
- return i;
+ return get_param_l(&out_param);
}
- /* Mac not found */
- return -EINVAL;
+ return __mlx4_register_mac(dev, port, mac);
}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
- struct mlx4_mac_entry *entry;
+ int index;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- index = find_index(dev, table, entry->mac);
- kfree(entry);
- }
- }
+ index = find_index(dev, table, mac);
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
- /* Check whether this address has reference count */
- if (!(--table->refs[index])) {
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
- }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
out:
mutex_unlock(&table->mutex);
}
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ return;
+ }
+ __mlx4_unregister_mac(dev, port, mac);
+ return;
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- int err;
+ int index = qpn - info->base_qpn;
+ int err = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- index = find_index(dev, table, entry->mac);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
- if (err || index < 0)
- return err;
+ mlx4_register_mac(dev, port, new_mac);
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ return err;
}
+ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
err = validate_index(dev, table, index);
@@ -304,7 +383,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) new_mac);
table->entries[index] = 0;
}
out:
@@ -312,6 +392,7 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
@@ -326,7 +407,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -352,7 +433,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+ int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
@@ -387,7 +469,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- /* Register new MAC */
+ /* Register new VLAN */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
@@ -405,9 +487,27 @@ out:
mutex_unlock(&table->mutex);
return err;
}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *index = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_register_vlan(dev, port, vlan, index);
+}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -432,6 +532,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
out:
mutex_unlock(&table->mutex);
}
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, port);
+ err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (!err)
+ mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+ index);
+
+ return;
+ }
+ __mlx4_unregister_vlan(dev, port, index);
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +581,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +619,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
@@ -512,6 +633,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
return err;
}
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+ u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_port_info *port_info;
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ struct mlx4_set_port_rqp_calc_context *qpn_context;
+ struct mlx4_set_port_general_context *gen_context;
+ int reset_qkey_viols;
+ int port;
+ int is_eth;
+ u32 in_modifier;
+ u32 promisc;
+ u16 mtu, prev_mtu;
+ int err;
+ int i;
+ __be32 agg_cap_mask;
+ __be32 slave_cap_mask;
+ __be32 new_cap_mask;
+
+ port = in_mod & 0xff;
+ in_modifier = in_mod >> 8;
+ is_eth = op_mod;
+ port_info = &priv->port[port];
+
+ /* Slaves cannot perform SET_PORT operations except changing MTU */
+ if (is_eth) {
+ if (slave != dev->caps.function &&
+ in_modifier != MLX4_SET_PORT_GENERAL) {
+ mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+ slave);
+ return -EINVAL;
+ }
+ switch (in_modifier) {
+ case MLX4_SET_PORT_RQP_CALC:
+ qpn_context = inbox->buf;
+ qpn_context->base_qpn =
+ cpu_to_be32(port_info->base_qpn);
+ qpn_context->n_mac = 0x7;
+ promisc = be32_to_cpu(qpn_context->promisc) >>
+ SET_PORT_PROMISC_SHIFT;
+ qpn_context->promisc = cpu_to_be32(
+ promisc << SET_PORT_PROMISC_SHIFT |
+ port_info->base_qpn);
+ promisc = be32_to_cpu(qpn_context->mcast) >>
+ SET_PORT_MC_PROMISC_SHIFT;
+ qpn_context->mcast = cpu_to_be32(
+ promisc << SET_PORT_MC_PROMISC_SHIFT |
+ port_info->base_qpn);
+ break;
+ case MLX4_SET_PORT_GENERAL:
+ gen_context = inbox->buf;
+ /* Mtu is configured as the max MTU among all the
+ * the functions on the port. */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu ==
+ master->max_mtu[port]) {
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++) {
+ master->max_mtu[port] =
+ max(master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ }
+
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ break;
+ }
+ return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ }
+
+ /* For IB, we only consider:
+ * - The capability mask, which is set to the aggregate of all
+ * slave function capabilities
+ * - The QKey violatin counter - reset according to each request.
+ */
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+ new_cap_mask = ((__be32 *) inbox->buf)[2];
+ } else {
+ reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+ new_cap_mask = ((__be32 *) inbox->buf)[1];
+ }
+
+ agg_cap_mask = 0;
+ slave_cap_mask =
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+ for (i = 0; i < dev->num_slaves; i++)
+ agg_cap_mask |=
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+ /* only clear mailbox for guests. Master may be setting
+ * MTU or PKEY table size
+ */
+ if (slave != dev->caps.function)
+ memset(inbox->buf, 0, 256);
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+ } else {
+ ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+ slave_cap_mask;
+ return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+ vhcr->op_modifier, inbox);
+}
+
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +782,145 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->n_mac = dev->caps.log_num_macs;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+ u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+ return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ if (slave != dev->caps.function)
+ return 0;
+ return mlx4_common_dump_eth_stats(dev, slave,
+ vhcr->in_modifier, outbox);
+}
+
+void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
+{
+ if (!mlx4_is_mfunc(dev)) {
+ *stats_bitmap = 0;
+ return;
+ }
+
+ *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
+ MLX4_STATS_TRAFFIC_DROPS_MASK |
+ MLX4_STATS_PORT_COUNTERS_MASK);
+
+ if (mlx4_is_master(dev))
+ *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
+}
+EXPORT_SYMBOL(mlx4_set_stats_bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647d0c7..1129677daa6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
- profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+ profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
@@ -110,7 +110,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
- profile[MLX4_RES_MTT].num = request->num_mtt;
+ profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
profile[MLX4_RES_MCG].num = request->num_mcg;
for (i = 0; i < MLX4_RES_NUM; ++i) {
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
- dev->caps.num_mtt_segs = profile[i].num;
+ dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
- init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_entry_sz =
+ ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->log_mc_hash_sz = profile[i].log_num - 1;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 15f870cb259..738f950a1ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
spin_unlock(&qp_table->lock);
if (!qp) {
- mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
complete(&qp->free);
}
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
- struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
- int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ return qp->qpn >= dev->caps.sqp_start &&
+ qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
};
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
+ u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
- if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
- return mlx4_cmd(dev, 0, qp->qpn, 2,
- MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+ ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+ if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+ cur_state != MLX4_QP_STATE_RST &&
+ is_qp0(dev, qp)) {
+ port = (qp->qpn & 1) + 1;
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ }
+ return ret;
+ }
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
+ port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ context->pri_path.sched_queue = (context->pri_path.sched_queue &
+ 0xc3);
+
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
- ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ ret = mlx4_cmd(dev, mailbox->dma,
+ qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
- op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+ optpar, sqd_event, qp, 0);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- int qpn;
- qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
- if (qpn == -1)
+ *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (*base == -1)
return -ENOMEM;
- *base = qpn;
return 0;
}
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cnt);
+ set_param_h(&in_param, align);
+ err = mlx4_cmd_imm(dev, in_param, &out_param,
+ RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+
+ *base = get_param_l(&out_param);
+ return 0;
+ }
+ return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- if (base_qpn < dev->caps.sqp_start + 8)
- return;
+ if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+ return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, base_qpn);
+ set_param_h(&in_param, cnt);
+ err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err) {
+ mlx4_warn(dev, "Failed to release qp range"
+ " base:%d cnt:%d\n", base_qpn, cnt);
+ }
+ } else
+ __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (!qpn)
- return -EINVAL;
-
- qp->qpn = qpn;
-
- err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
- spin_lock_irq(&qp_table->lock);
- err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
- spin_unlock_irq(&qp_table->lock);
- if (err)
- goto err_put_cmpt;
-
- atomic_set(&qp->refcount, 1);
- init_completion(&qp->free);
-
return 0;
-err_put_cmpt:
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
err_put_rdmarc:
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, qpn);
+ return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, qpn);
+ if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+ } else
+ __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
+
+ err = mlx4_qp_alloc_icm(dev, qpn);
+ if (err)
+ return err;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+ (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_icm;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_icm:
+ mlx4_qp_free_icm(dev, qpn);
+ return err;
+}
+
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
/*
* We reserve 2 extra QPs per port for the special QPs. The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
+
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
- MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 00000000000..bfdb7af19e4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3074 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0x7fffffffffffffffULL
+#define ETH_ALEN 6
+
+struct mac_res {
+ struct list_head list;
+ u64 mac;
+ u8 port;
+};
+
+struct res_common {
+ struct list_head list;
+ u32 res_id;
+ int owner;
+ int state;
+ int from_state;
+ int to_state;
+ int removing;
+};
+
+enum {
+ RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+ struct list_head list;
+ u8 gid[16];
+ enum mlx4_protocol prot;
+ enum mlx4_steer_type steer;
+};
+
+enum res_qp_states {
+ RES_QP_BUSY = RES_ANY_BUSY,
+
+ /* QP number was allocated */
+ RES_QP_RESERVED,
+
+ /* ICM memory for QP context was mapped */
+ RES_QP_MAPPED,
+
+ /* QP is in hw ownership */
+ RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+ switch (state) {
+ case RES_QP_BUSY: return "RES_QP_BUSY";
+ case RES_QP_RESERVED: return "RES_QP_RESERVED";
+ case RES_QP_MAPPED: return "RES_QP_MAPPED";
+ case RES_QP_HW: return "RES_QP_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_qp {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ struct res_srq *srq;
+ struct list_head mcg_list;
+ spinlock_t mcg_spl;
+ int local_qpn;
+};
+
+enum res_mtt_states {
+ RES_MTT_BUSY = RES_ANY_BUSY,
+ RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+ switch (state) {
+ case RES_MTT_BUSY: return "RES_MTT_BUSY";
+ case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_mtt {
+ struct res_common com;
+ int order;
+ atomic_t ref_count;
+};
+
+enum res_mpt_states {
+ RES_MPT_BUSY = RES_ANY_BUSY,
+ RES_MPT_RESERVED,
+ RES_MPT_MAPPED,
+ RES_MPT_HW,
+};
+
+struct res_mpt {
+ struct res_common com;
+ struct res_mtt *mtt;
+ int key;
+};
+
+enum res_eq_states {
+ RES_EQ_BUSY = RES_ANY_BUSY,
+ RES_EQ_RESERVED,
+ RES_EQ_HW,
+};
+
+struct res_eq {
+ struct res_common com;
+ struct res_mtt *mtt;
+};
+
+enum res_cq_states {
+ RES_CQ_BUSY = RES_ANY_BUSY,
+ RES_CQ_ALLOCATED,
+ RES_CQ_HW,
+};
+
+struct res_cq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ atomic_t ref_count;
+};
+
+enum res_srq_states {
+ RES_SRQ_BUSY = RES_ANY_BUSY,
+ RES_SRQ_ALLOCATED,
+ RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+ switch (state) {
+ case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+ case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+ case RES_SRQ_HW: return "RES_SRQ_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_srq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *cq;
+ atomic_t ref_count;
+};
+
+enum res_counter_states {
+ RES_COUNTER_BUSY = RES_ANY_BUSY,
+ RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+ switch (state) {
+ case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+ case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_counter {
+ struct res_common com;
+ int port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+ switch (rt) {
+ case RES_QP: return "RES_QP";
+ case RES_CQ: return "RES_CQ";
+ case RES_SRQ: return "RES_SRQ";
+ case RES_MPT: return "RES_MPT";
+ case RES_MTT: return "RES_MTT";
+ case RES_MAC: return "RES_MAC";
+ case RES_EQ: return "RES_EQ";
+ case RES_COUNTER: return "RES_COUNTER";
+ default: return "Unknown resource type !!!";
+ };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int t;
+
+ priv->mfunc.master.res_tracker.slave_list =
+ kzalloc(dev->num_slaves * sizeof(struct slave_list),
+ GFP_KERNEL);
+ if (!priv->mfunc.master.res_tracker.slave_list)
+ return -ENOMEM;
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+ for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+ INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+ slave_list[i].res_list[t]);
+ mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
+
+ mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+ dev->num_slaves);
+ for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+ INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+ GFP_ATOMIC|__GFP_NOWARN);
+
+ spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+ return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ if (priv->mfunc.master.res_tracker.slave_list) {
+ for (i = 0 ; i < dev->num_slaves; i++)
+ mlx4_delete_all_resources_for_slave(dev, i);
+
+ kfree(priv->mfunc.master.res_tracker.slave_list);
+ }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+ u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+ if (MLX4_QP_ST_UD == ts)
+ qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+ mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+ slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+ return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+ enum mlx4_resource type)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type,
+ void *res)
+{
+ struct res_common *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (!r) {
+ err = -ENONET;
+ goto exit;
+ }
+
+ if (r->state == RES_ANY_BUSY) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto exit;
+ }
+
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ ResourceType(type), r->res_id);
+
+ if (res)
+ *((struct res_common **)res) = r;
+
+exit:
+ spin_unlock_irq(mlx4_tlock(dev));
+ return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource type,
+ int res_id, int *slave)
+{
+
+ struct res_common *r;
+ int err = -ENOENT;
+ int id = res_id;
+
+ if (type == RES_QP)
+ id &= 0x7fffff;
+ spin_lock(mlx4_tlock(dev));
+
+ r = find_res(dev, id, type);
+ if (r) {
+ *slave = r->owner;
+ err = 0;
+ }
+ spin_unlock(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type)
+{
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (r)
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+ struct res_qp *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_QP_RESERVED;
+ ret->local_qpn = id;
+ INIT_LIST_HEAD(&ret->mcg_list);
+ spin_lock_init(&ret->mcg_spl);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+ struct res_mtt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->order = order;
+ ret->com.state = RES_MTT_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+ struct res_mpt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_MPT_RESERVED;
+ ret->key = key;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+ struct res_eq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_EQ_RESERVED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+ struct res_cq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_CQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+ struct res_srq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_SRQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+ struct res_counter *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_COUNTER_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+ int extra)
+{
+ struct res_common *ret;
+
+ switch (type) {
+ case RES_QP:
+ ret = alloc_qp_tr(id);
+ break;
+ case RES_MPT:
+ ret = alloc_mpt_tr(id, extra);
+ break;
+ case RES_MTT:
+ ret = alloc_mtt_tr(id, extra);
+ break;
+ case RES_EQ:
+ ret = alloc_eq_tr(id);
+ break;
+ case RES_CQ:
+ ret = alloc_cq_tr(id);
+ break;
+ case RES_SRQ:
+ ret = alloc_srq_tr(id);
+ break;
+ case RES_MAC:
+ printk(KERN_ERR "implementation missing\n");
+ return NULL;
+ case RES_COUNTER:
+ ret = alloc_counter_tr(id);
+ break;
+
+ default:
+ return NULL;
+ }
+ if (ret)
+ ret->owner = slave;
+
+ return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct res_common **res_arr;
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct radix_tree_root *root = &tracker->res_tree[type];
+
+ res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+ if (!res_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < count; ++i) {
+ res_arr[i] = alloc_tr(base + i, type, slave, extra);
+ if (!res_arr[i]) {
+ for (--i; i >= 0; --i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = 0; i < count; ++i) {
+ if (find_res(dev, base + i, type)) {
+ err = -EEXIST;
+ goto undo;
+ }
+ err = radix_tree_insert(root, base + i, res_arr[i]);
+ if (err)
+ goto undo;
+ list_add_tail(&res_arr[i]->list,
+ &tracker->slave_list[slave].res_list[type]);
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(res_arr);
+
+ return 0;
+
+undo:
+ for (--i; i >= base; --i)
+ radix_tree_delete(&tracker->res_tree[type], i);
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ for (i = 0; i < count; ++i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+
+ return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+ if (res->com.state == RES_QP_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_QP_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+ if (res->com.state == RES_MTT_BUSY ||
+ atomic_read(&res->ref_count)) {
+ printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
+ return -EBUSY;
+ } else if (res->com.state != RES_MTT_ALLOCATED)
+ return -EPERM;
+ else if (res->order != order)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+ if (res->com.state == RES_COUNTER_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_COUNTER_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+ if (res->com.state == RES_CQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_CQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+ if (res->com.state == RES_SRQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_SRQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+ switch (type) {
+ case RES_QP:
+ return remove_qp_ok((struct res_qp *)res);
+ case RES_CQ:
+ return remove_cq_ok((struct res_cq *)res);
+ case RES_SRQ:
+ return remove_srq_ok((struct res_srq *)res);
+ case RES_MPT:
+ return remove_mpt_ok((struct res_mpt *)res);
+ case RES_MTT:
+ return remove_mtt_ok((struct res_mtt *)res, extra);
+ case RES_MAC:
+ return -ENOSYS;
+ case RES_EQ:
+ return remove_eq_ok((struct res_eq *)res);
+ case RES_COUNTER:
+ return remove_counter_ok((struct res_counter *)res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ if (!r) {
+ err = -ENOENT;
+ goto out;
+ }
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto out;
+ }
+ err = remove_ok(r, type, extra);
+ if (err)
+ goto out;
+ }
+
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ radix_tree_delete(&tracker->res_tree[type], i);
+ list_del(&r->list);
+ kfree(r);
+ }
+ err = 0;
+
+out:
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+ enum res_qp_states state, struct res_qp **qp,
+ int alloc)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_qp *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_QP_BUSY:
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ __func__, r->com.res_id);
+ err = -EBUSY;
+ break;
+
+ case RES_QP_RESERVED:
+ if (r->com.state == RES_QP_MAPPED && !alloc)
+ break;
+
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ err = -EINVAL;
+ break;
+
+ case RES_QP_MAPPED:
+ if ((r->com.state == RES_QP_RESERVED && alloc) ||
+ r->com.state == RES_QP_HW)
+ break;
+ else {
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ r->com.res_id);
+ err = -EINVAL;
+ }
+
+ break;
+
+ case RES_QP_HW:
+ if (r->com.state != RES_QP_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_QP_BUSY;
+ if (qp)
+ *qp = (struct res_qp *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_mpt_states state, struct res_mpt **mpt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mpt *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_MPT_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_RESERVED:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_MAPPED:
+ if (r->com.state != RES_MPT_RESERVED &&
+ r->com.state != RES_MPT_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_HW:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_MPT_BUSY;
+ if (mpt)
+ *mpt = (struct res_mpt *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_eq_states state, struct res_eq **eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_eq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_EQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_RESERVED:
+ if (r->com.state != RES_EQ_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_HW:
+ if (r->com.state != RES_EQ_RESERVED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_EQ_BUSY;
+ if (eq)
+ *eq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+ enum res_cq_states state, struct res_cq **cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_cq *r;
+ int err;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_CQ_BUSY:
+ err = -EBUSY;
+ break;
+
+ case RES_CQ_ALLOCATED:
+ if (r->com.state != RES_CQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ break;
+
+ case RES_CQ_HW:
+ if (r->com.state != RES_CQ_ALLOCATED)
+ err = -EINVAL;
+ else
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_cq_states state, struct res_srq **srq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_srq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_SRQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_SRQ_ALLOCATED:
+ if (r->com.state != RES_SRQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ break;
+
+ case RES_SRQ_HW:
+ if (r->com.state != RES_SRQ_ALLOCATED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->to_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+ return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err;
+ int count;
+ int align;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ count = get_param_l(&in_param);
+ align = get_param_h(&in_param);
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err) {
+ __mlx4_qp_release_range(dev, base, count);
+ return err;
+ }
+ set_param_l(out_param, base);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ if (valid_reserved(dev, slave, qpn)) {
+ err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ if (err)
+ return err;
+ }
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+ NULL, 1);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn)) {
+ err = __mlx4_qp_alloc_icm(dev, qpn);
+ if (err) {
+ res_abort_move(dev, slave, RES_QP, qpn);
+ return err;
+ }
+ }
+
+ res_end_move(dev, slave, RES_QP, qpn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ order = get_param_l(&in_param);
+ base = __mlx4_alloc_mtt_range(dev, order);
+ if (base == -1)
+ return -ENOMEM;
+
+ err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (err)
+ __mlx4_free_mtt_range(dev, base, order);
+ else
+ set_param_l(out_param, base);
+
+ return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = __mlx4_mr_reserve(dev);
+ if (index == -1)
+ break;
+ id = index & mpt_mask(dev);
+
+ err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+ if (err) {
+ __mlx4_mr_release(dev, index);
+ break;
+ }
+ set_param_l(out_param, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ if (err) {
+ res_abort_move(dev, slave, RES_MPT, id);
+ return err;
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ break;
+ }
+ return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err) {
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+ }
+
+ set_param_l(out_param, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err) {
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+ }
+
+ set_param_l(out_param, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct mac_res *res;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->mac = mac;
+ res->port = (u8) port;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_MAC]);
+ return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ list_del(&res->list);
+ kfree(res);
+ break;
+ }
+ }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ list_del(&res->list);
+ __mlx4_unregister_mac(dev, res->port, res->mac);
+ kfree(res);
+ }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int port;
+ u64 mac;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ port = get_param_l(out_param);
+ mac = in_param;
+
+ err = __mlx4_register_mac(dev, port, mac);
+ if (err >= 0) {
+ set_param_l(out_param, err);
+ err = 0;
+ }
+
+ if (!err) {
+ err = mac_add_to_slave(dev, slave, mac, port);
+ if (err)
+ __mlx4_unregister_mac(dev, port, mac);
+ }
+ return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_CQ:
+ err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err;
+ int count;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ base = get_param_l(&in_param) & 0x7fffff;
+ count = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err)
+ break;
+ __mlx4_qp_release_range(dev, base, count);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+ NULL, 0);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ if (valid_reserved(dev, slave, qpn))
+ err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ base = get_param_l(&in_param);
+ order = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (!err)
+ __mlx4_free_mtt_range(dev, base, order);
+ return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ break;
+ index = mpt->key;
+ put_res(dev, slave, id, RES_MPT);
+
+ err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+ if (err)
+ break;
+ __mlx4_mr_release(dev, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
+ return err;
+
+ __mlx4_mr_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
+ return err;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ cqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err)
+ break;
+
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ srqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err)
+ break;
+
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int port;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ port = get_param_l(out_param);
+ mac_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_mac(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = -EINVAL;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_CQ:
+ err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ break;
+ }
+ return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+ return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+ return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+ int page_shift = (qpc->log_page_size & 0x3f) + 12;
+ int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+ int log_sq_sride = qpc->sq_size_stride & 7;
+ int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+ int log_rq_stride = qpc->rq_size_stride & 7;
+ int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+ int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+ int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ int sq_size;
+ int rq_size;
+ int total_pages;
+ int total_mem;
+ int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+ sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+ rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+ total_mem = sq_size + rq_size;
+ total_pages =
+ roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+ page_shift);
+
+ return total_pages;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+ int size, struct res_mtt *mtt)
+{
+ int res_start = mtt->com.res_id;
+ int res_size = (1 << mtt->order);
+
+ if (start < res_start || start + size > res_start + res_size)
+ return -EPERM;
+ return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_mpt *mpt;
+ int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+ int phys;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+ if (err)
+ return err;
+
+ phys = mr_phys_mpt(inbox->buf);
+ if (!phys) {
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base,
+ mr_get_mtt_size(inbox->buf), mtt);
+ if (err)
+ goto ex_put;
+
+ mpt->mtt = mtt;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ if (!phys) {
+ atomic_inc(&mtt->ref_count);
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_put:
+ if (!phys)
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ return err;
+
+ if (mpt->com.from_state != RES_MPT_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+ put_res(dev, slave, id, RES_MPT);
+ return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_mtt *mtt;
+ struct res_qp *qp;
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+ int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+ int mtt_size = qp_get_mtt_size(qpc);
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ int rcqn = qp_get_rcqn(qpc);
+ int scqn = qp_get_scqn(qpc);
+ u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+ int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+ struct res_srq *srq;
+ int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+ if (err)
+ return err;
+ qp->local_qpn = local_qpn;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+ if (err)
+ goto ex_put_mtt;
+
+ if (scqn != rcqn) {
+ err = get_res(dev, slave, scqn, RES_CQ, &scq);
+ if (err)
+ goto ex_put_rcq;
+ } else
+ scq = rcq;
+
+ if (use_srq) {
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ goto ex_put_scq;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_srq;
+ atomic_inc(&mtt->ref_count);
+ qp->mtt = mtt;
+ atomic_inc(&rcq->ref_count);
+ qp->rcq = rcq;
+ atomic_inc(&scq->ref_count);
+ qp->scq = scq;
+
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+
+ if (use_srq) {
+ atomic_inc(&srq->ref_count);
+ put_res(dev, slave, srqn, RES_SRQ);
+ qp->srq = srq;
+ }
+ put_res(dev, slave, rcqn, RES_CQ);
+ put_res(dev, slave, mtt_base, RES_MTT);
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ return 0;
+
+ex_put_srq:
+ if (use_srq)
+ put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+ put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+ put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+ return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+ int log_eq_size = eqc->log_eq_size & 0x1f;
+ int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+ if (log_eq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+ return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+ int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+ int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+ if (log_cq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int eqn = vhcr->in_modifier;
+ int res_id = (slave << 8) | eqn;
+ struct mlx4_eq_context *eqc = inbox->buf;
+ int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+ int mtt_size = eq_get_mtt_size(eqc);
+ struct res_eq *eq;
+ struct res_mtt *mtt;
+
+ err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ if (err)
+ return err;
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+ if (err)
+ goto out_add;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto out_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+
+ atomic_inc(&mtt->ref_count);
+ eq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+ int len, struct res_mtt **res)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mtt *mtt;
+ int err = -EINVAL;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+ com.list) {
+ if (!check_mtt_range(dev, slave, start, len, mtt)) {
+ *res = mtt;
+ mtt->com.from_state = mtt->com.state;
+ mtt->com.state = RES_MTT_BUSY;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_mtt mtt;
+ __be64 *page_list = inbox->buf;
+ u64 *pg_list = (u64 *)page_list;
+ int i;
+ struct res_mtt *rmtt = NULL;
+ int start = be64_to_cpu(page_list[0]);
+ int npages = vhcr->in_modifier;
+ int err;
+
+ err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+ if (err)
+ return err;
+
+ /* Call the SW implementation of write_mtt:
+ * - Prepare a dummy mtt struct
+ * - Translate inbox contents to simple addresses in host endianess */
+ mtt.offset = 0; /* TBD this is broken but I don't handle it since
+ we don't really use it */
+ mtt.order = 0;
+ mtt.page_shift = 0;
+ for (i = 0; i < npages; ++i)
+ pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+ err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+ ((u64 *)page_list + 2));
+
+ if (rmtt)
+ put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+ return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+ if (err)
+ return err;
+
+ err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+ if (err)
+ goto ex_abort;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ atomic_dec(&eq->mtt->ref_count);
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+ return 0;
+
+ex_put:
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+
+ return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_modifier = 0;
+ int err;
+ int res_id;
+ struct res_eq *req;
+
+ if (!priv->mfunc.master.slave_state)
+ return -EINVAL;
+
+ event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
+
+ /* Create the event only if the slave is registered */
+ if (event_eq->eqn < 0)
+ return 0;
+
+ mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ res_id = (slave << 8) | event_eq->eqn;
+ err = get_res(dev, slave, res_id, RES_EQ, &req);
+ if (err)
+ goto unlock;
+
+ if (req->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto put;
+ }
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto put;
+ }
+
+ if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+ ++event_eq->token;
+ eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+ }
+
+ memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+ err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ put_res(dev, slave, res_id, RES_EQ);
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+
+put:
+ put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = get_res(dev, slave, res_id, RES_EQ, &eq);
+ if (err)
+ return err;
+
+ if (eq->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+ put_res(dev, slave, res_id, RES_EQ);
+ return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+ struct res_cq *cq;
+ struct res_mtt *mtt;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto out_put;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_move;
+ atomic_dec(&cq->mtt->ref_count);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd,
+ struct res_cq *cq)
+{
+ int err;
+ struct res_mtt *orig_mtt;
+ struct res_mtt *mtt;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+ err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+ if (err)
+ return err;
+
+ if (orig_mtt != cq->mtt) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_put;
+
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto ex_put1;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put1;
+ atomic_dec(&orig_mtt->ref_count);
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ return 0;
+
+ex_put1:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+ return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ if (vhcr->op_modifier == 0) {
+ err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+ if (err)
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+ int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+ int log_rq_stride = srqc->logstride & 7;
+ int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+ if (log_srq_size + log_rq_stride + 4 < page_shift)
+ return 1;
+
+ return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_srq *srq;
+ struct mlx4_srq_context *srqc = inbox->buf;
+ int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+ if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+ return -EINVAL;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+ err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+ mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_mtt;
+
+ atomic_inc(&mtt->ref_count);
+ srq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+ return 0;
+
+ex_put_mtt:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = get_res(dev, slave, qpn, RES_QP, &qp);
+ if (err)
+ return err;
+ if (qp->com.from_state != RES_QP_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+ update_ud_gid(dev, qpc, (u8)slave);
+
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ atomic_dec(&qp->mtt->ref_count);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ res_end_move(dev, slave, RES_QP, qpn);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+ struct res_qp *rqp, u8 *gid)
+{
+ struct res_gid *res;
+
+ list_for_each_entry(res, &rqp->mcg_list, list) {
+ if (!memcmp(res->gid, gid, 16))
+ return res;
+ }
+ return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer)
+{
+ struct res_gid *res;
+ int err;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ if (find_gid(dev, slave, rqp, gid)) {
+ kfree(res);
+ err = -EEXIST;
+ } else {
+ memcpy(res->gid, gid, 16);
+ res->prot = prot;
+ res->steer = steer;
+ list_add_tail(&res->list, &rqp->mcg_list);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer)
+{
+ struct res_gid *res;
+ int err;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ res = find_gid(dev, slave, rqp, gid);
+ if (!res || res->prot != prot || res->steer != steer)
+ err = -EINVAL;
+ else {
+ list_del(&res->list);
+ kfree(res);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+ u8 *gid = inbox->buf;
+ enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+ int err, err1;
+ int qpn;
+ struct res_qp *rqp;
+ int attach = vhcr->op_modifier;
+ int block_loopback = vhcr->in_modifier >> 31;
+ u8 steer_type_mask = 2;
+ enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
+
+ qpn = vhcr->in_modifier & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err)
+ return err;
+
+ qp.qpn = qpn;
+ if (attach) {
+ err = add_mcg_res(dev, slave, rqp, gid, prot, type);
+ if (err)
+ goto ex_put;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid,
+ block_loopback, prot, type);
+ if (err)
+ goto ex_rem;
+ } else {
+ err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
+ if (err)
+ goto ex_put;
+ err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+ }
+
+ put_res(dev, slave, qpn, RES_QP);
+ return 0;
+
+ex_rem:
+ /* ignore error return below, already in error */
+ err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type);
+ex_put:
+ put_res(dev, slave, qpn, RES_QP);
+
+ return err;
+}
+
+enum {
+ BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier & 0xffff;
+
+ err = get_res(dev, slave, index, RES_COUNTER, NULL);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ put_res(dev, slave, index, RES_COUNTER);
+ return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+ struct res_gid *rgid;
+ struct res_gid *tmp;
+ int err;
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+ qp.qpn = rqp->local_qpn;
+ err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+ rgid->steer);
+ list_del(&rgid->list);
+ kfree(rgid);
+ }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int print)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+ struct res_common *r;
+ struct res_common *tmp;
+ int busy;
+
+ busy = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(r, tmp, rlist, list) {
+ if (r->owner == slave) {
+ if (!r->removing) {
+ if (r->state == RES_ANY_BUSY) {
+ if (print)
+ mlx4_dbg(dev,
+ "%s id 0x%x is busy\n",
+ ResourceType(type),
+ r->res_id);
+ ++busy;
+ } else {
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ r->removing = 1;
+ }
+ }
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type)
+{
+ unsigned long begin;
+ int busy;
+
+ begin = jiffies;
+ do {
+ busy = _move_all_busy(dev, slave, type, 0);
+ if (time_after(jiffies, begin + 5 * HZ))
+ break;
+ if (busy)
+ cond_resched();
+ } while (busy);
+
+ if (busy)
+ busy = _move_all_busy(dev, slave, type, 1);
+
+ return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *qp_list =
+ &tracker->slave_list[slave].res_list[RES_QP];
+ struct res_qp *qp;
+ struct res_qp *tmp;
+ int state;
+ u64 in_param;
+ int qpn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_QP);
+ if (err)
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+ "for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (qp->com.owner == slave) {
+ qpn = qp->com.res_id;
+ detach_qp(dev, slave, qp);
+ state = qp->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_QP_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_QP],
+ qp->com.res_id);
+ list_del(&qp->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(qp);
+ state = 0;
+ break;
+ case RES_QP_MAPPED:
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+ state = RES_QP_RESERVED;
+ break;
+ case RES_QP_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param,
+ qp->local_qpn, 2,
+ MLX4_CMD_2RST_QP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_qps: failed"
+ " to move slave %d qpn %d to"
+ " reset\n", slave,
+ qp->local_qpn);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ atomic_dec(&qp->mtt->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ state = RES_QP_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *srq_list =
+ &tracker->slave_list[slave].res_list[RES_SRQ];
+ struct res_srq *srq;
+ struct res_srq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int srqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_SRQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (srq->com.owner == slave) {
+ srqn = srq->com.res_id;
+ state = srq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_SRQ_ALLOCATED:
+ __mlx4_srq_free_icm(dev, srqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_SRQ],
+ srqn);
+ list_del(&srq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(srq);
+ state = 0;
+ break;
+
+ case RES_SRQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, srqn, 1,
+ MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_srqs: failed"
+ " to move slave %d srq %d to"
+ " SW ownership\n",
+ slave, srqn);
+
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ state = RES_SRQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *cq_list =
+ &tracker->slave_list[slave].res_list[RES_CQ];
+ struct res_cq *cq;
+ struct res_cq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int cqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_CQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+ cqn = cq->com.res_id;
+ state = cq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_CQ_ALLOCATED:
+ __mlx4_cq_free_icm(dev, cqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_CQ],
+ cqn);
+ list_del(&cq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(cq);
+ state = 0;
+ break;
+
+ case RES_CQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, cqn, 1,
+ MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_cqs: failed"
+ " to move slave %d cq %d to"
+ " SW ownership\n",
+ slave, cqn);
+ atomic_dec(&cq->mtt->ref_count);
+ state = RES_CQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mpt_list =
+ &tracker->slave_list[slave].res_list[RES_MPT];
+ struct res_mpt *mpt;
+ struct res_mpt *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int mptn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MPT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mpt->com.owner == slave) {
+ mptn = mpt->com.res_id;
+ state = mpt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MPT_RESERVED:
+ __mlx4_mr_release(dev, mpt->key);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MPT],
+ mptn);
+ list_del(&mpt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mpt);
+ state = 0;
+ break;
+
+ case RES_MPT_MAPPED:
+ __mlx4_mr_free_icm(dev, mpt->key);
+ state = RES_MPT_RESERVED;
+ break;
+
+ case RES_MPT_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, mptn, 0,
+ MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_mrs: failed"
+ " to move slave %d mpt %d to"
+ " SW ownership\n",
+ slave, mptn);
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+ state = RES_MPT_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *mtt_list =
+ &tracker->slave_list[slave].res_list[RES_MTT];
+ struct res_mtt *mtt;
+ struct res_mtt *tmp;
+ int state;
+ LIST_HEAD(tlist);
+ int base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MTT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mtt->com.owner == slave) {
+ base = mtt->com.res_id;
+ state = mtt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MTT_ALLOCATED:
+ __mlx4_free_mtt_range(dev, base,
+ mtt->order);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MTT],
+ base);
+ list_del(&mtt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mtt);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *eq_list =
+ &tracker->slave_list[slave].res_list[RES_EQ];
+ struct res_eq *eq;
+ struct res_eq *tmp;
+ int err;
+ int state;
+ LIST_HEAD(tlist);
+ int eqn;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ err = move_all_busy(dev, slave, RES_EQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (eq->com.owner == slave) {
+ eqn = eq->com.res_id;
+ state = eq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_EQ_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_EQ],
+ eqn);
+ list_del(&eq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(eq);
+ state = 0;
+ break;
+
+ case RES_EQ_HW:
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ cond_resched();
+ continue;
+ }
+ err = mlx4_cmd_box(dev, slave, 0,
+ eqn & 0xff, 0,
+ MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_dbg(dev, "rem_slave_eqs: failed"
+ " to move slave %d eqs %d to"
+ " SW ownership\n", slave, eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err) {
+ atomic_dec(&eq->mtt->ref_count);
+ state = RES_EQ_RESERVED;
+ }
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+ /*VLAN*/
+ rem_slave_macs(dev, slave);
+ rem_slave_qps(dev, slave);
+ rem_slave_srqs(dev, slave);
+ rem_slave_cqs(dev, slave);
+ rem_slave_mrs(dev, slave);
+ rem_slave_eqs(dev, slave);
+ rem_slave_mtts(dev, slave);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7411d..80249829352 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
- MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9cbf3fce014..feda6c00829 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -38,26 +40,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_srq_context {
- __be32 state_logsize_srqn;
- u8 logstride;
- u8 reserved1;
- __be16 xrcd;
- __be32 pg_offset_cqn;
- u32 reserved2;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 pd;
- __be16 limit_watermark;
- __be16 wqe_cnt;
- u16 reserved4;
- __be16 wqe_counter;
- u32 reserved5;
- __be64 db_rec_addr;
-};
-
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
- return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
+ MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
- struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_srq_context *srq_context;
- u64 mtt_addr;
int err;
- srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
- if (srq->srqn == -1)
+
+ *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *srqn = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+ mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+ mlx4_table_put(dev, &srq_table->table, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, srqn);
+ if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+ return;
+ }
+ __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+ if (err)
+ return err;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+ mlx4_srq_free_icm(dev, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
complete(&srq->free);
wait_for_completion(&srq->free);
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+ mlx4_srq_free_icm(dev, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}