summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMoshe Lazer <moshel@mellanox.com>2013-08-14 17:46:48 +0300
committerDavid S. Miller <davem@davemloft.net>2013-08-15 15:42:57 -0700
commit0a324f3189ed9c78b1aaf48d88e93cb18643c655 (patch)
tree74ce14e88537117866a5327e7afb2d4ba3617990 /drivers
parent15718ea0d844e4816dbd95d57a8a0e3e264ba90e (diff)
net/mlx5_core: Support MANAGE_PAGES and QUERY_PAGES firmware command changes
In the previous QUERY_PAGES command version we used one command to get the required amount of boot, init and post init pages. The new version uses the op_mod field to specify whether the query is for the required amount of boot, init or post init pages. In addition the output field size for the required amount of pages increased from 16 to 32 bits. In MANAGE_PAGES command the input_num_entries and output_num_entries fields sizes changed from 16 to 32 bits and the PAS tables offset changed to 0x10. In the pages request event the num_pages field also changed to 32 bits. In the HCA-capabilities-layout the size and location of max_qp_mcg field has been changed to support 24 bits. This patch isn't compatible with firmware versions < 5; however, it turns out that the first GA firmware we will publish will not support previous versions so this should be OK. Signed-off-by: Moshe Lazer <moshel@mellanox.com> Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
4 files changed, 29 insertions, 35 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c571de85d0f..5472cbd3402 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
#include "mlx5_core.h"
enum {
- CMD_IF_REV = 4,
+ CMD_IF_REV = 5,
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb..443cc4d7b02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_PAGE_REQUEST:
{
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
- s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+ s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f..f012658b6a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
caps->log_max_mcg = out->hca_cap.log_max_mcg;
- caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+ caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4a3e137931a..3a2408d4482 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
MLX5_PAGES_TAKE = 2
};
+enum {
+ MLX5_BOOT_PAGES = 1,
+ MLX5_INIT_PAGES = 2,
+ MLX5_POST_INIT_PAGES = 3
+};
+
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u32 func_id;
- s16 npages;
+ s32 npages;
struct work_struct work;
};
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr;
- __be16 num_boot_pages;
+ __be16 rsvd;
__be16 func_id;
- __be16 init_pages;
- __be16 num_pages;
+ __be32 num_pages;
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
- __be16 rsvd0;
+ __be16 rsvd;
__be16 func_id;
- __be16 rsvd1;
- __be16 num_entries;
- u8 rsvd2[16];
+ __be32 num_entries;
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
- u8 rsvd0[2];
- __be16 num_entries;
- u8 rsvd1[20];
+ __be32 num_entries;
+ u8 rsvd[4];
__be64 pas[0];
};
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
}
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
- s16 *pages, s16 *init_pages, u16 *boot_pages)
+ s32 *npages, int boot)
{
struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+ in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
- if (pages)
- *pages = be16_to_cpu(out.num_pages);
-
- if (init_pages)
- *init_pages = be16_to_cpu(out.init_pages);
-
- if (boot_pages)
- *boot_pages = be16_to_cpu(out.num_boot_pages);
-
+ *npages = be32_to_cpu(out.num_pages);
*func_id = be16_to_cpu(out.func_id);
return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be16(npages);
+ in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be16(npages);
+ in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
- num_claimed = be16_to_cpu(out->num_entries);
+ num_claimed = be32_to_cpu(out->num_entries);
if (nclaimed)
*nclaimed = num_claimed;
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
}
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages)
+ s32 npages)
{
struct mlx5_pages_req *req;
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
- u16 uninitialized_var(boot_pages);
- s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id);
+ s32 uninitialized_var(npages);
int err;
- err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
- &boot_pages);
+ err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
if (err)
return err;
+ mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+ npages, boot ? "boot" : "init", func_id);
- mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
- init_pages, boot_pages, func_id);
- return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
+ return give_pages(dev, func_id, npages, 0);
}
static int optimal_reclaimed_pages(void)