From 9603b61de1eee92977d74ff42541be20c0c5b1a7 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 28 Jul 2014 23:30:22 +0300 Subject: mlx5: Move pci device handling from mlx5_ib to mlx5_core In preparation for a new mlx5 device which is VPI (i.e., ports can be either IB or ETH), move the pci device functionality from mlx5_ib to mlx5_core. This involves the following changes: 1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev is now an independent structure maintained by mlx5_core. mlx5_ib_dev now has a pointer to that struct. This requires changing a lot of places where the core_dev struct was accessed via mlx5_ib_dev (now, this needs to be a pointer dereference). 2. All PCI initializations are now done in mlx5_core. Thus, it is now mlx5_core which does pci_register_device (and not mlx5_ib, as was previously). 3. mlx5_ib now registers itself with mlx5_core as an "interface" driver. This is very similar to the mechanism employed for the mlx4 (ConnectX) driver. Once the HCA is initialized (by mlx5_core), it invokes the interface drivers to do their initializations. 4. There is a new event handler which the core registers: mlx5_core_event(). This event handler invokes the event handlers registered by the interfaces. Based on a patch by Eli Cohen Signed-off-by: Jack Morgenstein Signed-off-by: Eli Cohen Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 281 ++++++++++++-------------------------- 1 file changed, 91 insertions(+), 190 deletions(-) (limited to 'drivers/infiniband/hw/mlx5/main.c') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 364d4b6937f..f2cfd363a70 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -54,96 +54,17 @@ MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); -static int prof_sel = 2; -module_param_named(prof_sel, prof_sel, int, 0444); -MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); +static int deprecated_prof_sel = 2; +module_param_named(prof_sel, deprecated_prof_sel, int, 0444); +MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; -static struct mlx5_profile profile[] = { - [0] = { - .mask = 0, - }, - [1] = { - .mask = MLX5_PROF_MASK_QP_SIZE, - .log_max_qp = 12, - }, - [2] = { - .mask = MLX5_PROF_MASK_QP_SIZE | - MLX5_PROF_MASK_MR_CACHE, - .log_max_qp = 17, - .mr_cache[0] = { - .size = 500, - .limit = 250 - }, - .mr_cache[1] = { - .size = 500, - .limit = 250 - }, - .mr_cache[2] = { - .size = 500, - .limit = 250 - }, - .mr_cache[3] = { - .size = 500, - .limit = 250 - }, - .mr_cache[4] = { - .size = 500, - .limit = 250 - }, - .mr_cache[5] = { - .size = 500, - .limit = 250 - }, - .mr_cache[6] = { - .size = 500, - .limit = 250 - }, - .mr_cache[7] = { - .size = 500, - .limit = 250 - }, - .mr_cache[8] = { - .size = 500, - .limit = 250 - }, - .mr_cache[9] = { - .size = 500, - .limit = 250 - }, - .mr_cache[10] = { - .size = 500, - .limit = 250 - }, - .mr_cache[11] = { - .size = 500, - .limit = 250 - }, - .mr_cache[12] = { - .size = 64, - .limit = 32 - }, - .mr_cache[13] = { - .size = 32, - .limit = 16 - }, - .mr_cache[14] = { - .size = 16, - .limit = 8 - }, - .mr_cache[15] = { - .size = 8, - .limit = 4 - }, - }, -}; - int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) { - struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq_table *table = &dev->mdev->priv.eq_table; struct mlx5_eq *eq, *n; int err = -ENOENT; @@ -163,7 +84,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) static int alloc_comp_eqs(struct mlx5_ib_dev *dev) { - struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq_table *table = &dev->mdev->priv.eq_table; char name[MLX5_MAX_EQ_NAME]; struct mlx5_eq *eq, *n; int ncomp_vec; @@ -182,9 +103,9 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev) } snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); - err = mlx5_create_map_eq(&dev->mdev, eq, + err = mlx5_create_map_eq(dev->mdev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->mdev.priv.uuari.uars[0]); + name, &dev->mdev->priv.uuari.uars[0]); if (err) { kfree(eq); goto clean; @@ -204,7 +125,7 @@ clean: list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { list_del(&eq->list); spin_unlock(&table->lock); - if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) + if (mlx5_destroy_unmap_eq(dev->mdev, eq)) mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); kfree(eq); spin_lock(&table->lock); @@ -215,14 +136,14 @@ clean: static void free_comp_eqs(struct mlx5_ib_dev *dev) { - struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq_table *table = &dev->mdev->priv.eq_table; struct mlx5_eq *eq, *n; spin_lock(&table->lock); list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { list_del(&eq->list); spin_unlock(&table->lock); - if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) + if (mlx5_destroy_unmap_eq(dev->mdev, eq)) mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); kfree(eq); spin_lock(&table->lock); @@ -255,14 +176,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, memset(props, 0, sizeof(*props)); - props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) | - (fw_rev_min(&dev->mdev) << 16) | - fw_rev_sub(&dev->mdev); + props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | + (fw_rev_min(dev->mdev) << 16) | + fw_rev_sub(dev->mdev); props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; - flags = dev->mdev.caps.flags; + flags = dev->mdev->caps.flags; if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) @@ -292,30 +213,30 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; - props->page_size_cap = dev->mdev.caps.min_page_sz; - props->max_qp = 1 << dev->mdev.caps.log_max_qp; - props->max_qp_wr = dev->mdev.caps.max_wqes; - max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); - max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / + props->page_size_cap = dev->mdev->caps.min_page_sz; + props->max_qp = 1 << dev->mdev->caps.log_max_qp; + props->max_qp_wr = dev->mdev->caps.max_wqes; + max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); + max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / sizeof(struct mlx5_wqe_data_seg); props->max_sge = min(max_rq_sg, max_sq_sg); - props->max_cq = 1 << dev->mdev.caps.log_max_cq; - props->max_cqe = dev->mdev.caps.max_cqes - 1; - props->max_mr = 1 << dev->mdev.caps.log_max_mkey; - props->max_pd = 1 << dev->mdev.caps.log_max_pd; - props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp; - props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp; + props->max_cq = 1 << dev->mdev->caps.log_max_cq; + props->max_cqe = dev->mdev->caps.max_cqes - 1; + props->max_mr = 1 << dev->mdev->caps.log_max_mkey; + props->max_pd = 1 << dev->mdev->caps.log_max_pd; + props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp; + props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; - props->max_srq = 1 << dev->mdev.caps.log_max_srq; - props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1; + props->max_srq = 1 << dev->mdev->caps.log_max_srq; + props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1; props->max_srq_sge = max_rq_sg - 1; props->max_fast_reg_page_list_len = (unsigned int)-1; - props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; + props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_NONE; props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); - props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; - props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; + props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg; + props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ @@ -336,7 +257,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, int ext_active_speed; int err = -ENOMEM; - if (port < 1 || port > dev->mdev.caps.num_ports) { + if (port < 1 || port > dev->mdev->caps.num_ports) { mlx5_ib_warn(dev, "invalid port number %d\n", port); return -EINVAL; } @@ -367,8 +288,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); props->gid_tbl_len = out_mad->data[50]; - props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg; - props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len; + props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg; + props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len; props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; @@ -395,7 +316,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == 4) { - if (dev->mdev.caps.ext_port_cap[port - 1] & + if (dev->mdev->caps.ext_port_cap[port - 1] & MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; @@ -508,7 +429,7 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, * a 144 trap. If cmd fails, just ignore. */ memcpy(&in, props->node_desc, 64); - err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out, + err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, sizeof(out), MLX5_REG_NODE_DESC, 0, 1); if (err) return err; @@ -535,7 +456,7 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, tmp = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; - err = mlx5_set_port_caps(&dev->mdev, port, tmp); + err = mlx5_set_port_caps(dev->mdev, port, tmp); out: mutex_unlock(&dev->cap_mask_mutex); @@ -591,14 +512,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; - resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; - resp.bf_reg_size = dev->mdev.caps.bf_reg_size; + resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp; + resp.bf_reg_size = dev->mdev->caps.bf_reg_size; resp.cache_line_size = L1_CACHE_BYTES; - resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz; - resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz; - resp.max_send_wqebb = dev->mdev.caps.max_wqes; - resp.max_recv_wr = dev->mdev.caps.max_wqes; - resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes; + resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz; + resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz; + resp.max_send_wqebb = dev->mdev->caps.max_wqes; + resp.max_recv_wr = dev->mdev->caps.max_wqes; + resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) @@ -635,7 +556,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, } for (i = 0; i < num_uars; i++) { - err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index); + err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); if (err) goto out_count; } @@ -644,7 +565,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, mutex_init(&context->db_page_mutex); resp.tot_uuars = req.total_num_uuars; - resp.num_ports = dev->mdev.caps.num_ports; + resp.num_ports = dev->mdev->caps.num_ports; err = ib_copy_to_udata(udata, &resp, sizeof(resp) - sizeof(resp.reserved)); if (err) @@ -658,7 +579,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, out_uars: for (i--; i >= 0; i--) - mlx5_cmd_free_uar(&dev->mdev, uars[i].index); + mlx5_cmd_free_uar(dev->mdev, uars[i].index); out_count: kfree(uuari->count); @@ -681,7 +602,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) int i; for (i = 0; i < uuari->num_uars; i++) { - if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index)) + if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); } @@ -695,7 +616,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) { - return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index; + return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; } static int get_command(unsigned long offset) @@ -773,7 +694,7 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); seg->start_addr = 0; - err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in), + err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), NULL, NULL, NULL); if (err) { mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); @@ -798,7 +719,7 @@ static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) memset(&mr, 0, sizeof(mr)); mr.key = key; - err = mlx5_core_destroy_mkey(&dev->mdev, &mr); + err = mlx5_core_destroy_mkey(dev->mdev, &mr); if (err) mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); } @@ -815,7 +736,7 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, if (!pd) return ERR_PTR(-ENOMEM); - err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn); + err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); if (err) { kfree(pd); return ERR_PTR(err); @@ -824,14 +745,14 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, if (context) { resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { - mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); + mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); kfree(pd); return ERR_PTR(-EFAULT); } } else { err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); if (err) { - mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); + mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); kfree(pd); return ERR_PTR(err); } @@ -848,7 +769,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) if (!pd->uobject) free_pa_mkey(mdev, mpd->pa_lkey); - mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn); + mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); kfree(mpd); return 0; @@ -859,7 +780,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) struct mlx5_ib_dev *dev = to_mdev(ibqp->device); int err; - err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num); + err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); if (err) mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", ibqp->qp_num, gid->raw); @@ -872,7 +793,7 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) struct mlx5_ib_dev *dev = to_mdev(ibqp->device); int err; - err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num); + err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); if (err) mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", ibqp->qp_num, gid->raw); @@ -906,7 +827,7 @@ static int init_node_data(struct mlx5_ib_dev *dev) if (err) goto out; - dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); + dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); out: @@ -921,7 +842,7 @@ static ssize_t show_fw_pages(struct device *device, struct device_attribute *att struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages); + return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); } static ssize_t show_reg_pages(struct device *device, @@ -930,7 +851,7 @@ static ssize_t show_reg_pages(struct device *device, struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages); + return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, @@ -938,7 +859,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, { struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "MT%d\n", dev->mdev.pdev->device); + return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); } static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, @@ -946,8 +867,8 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, { struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev), - fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev)); + return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), + fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); } static ssize_t show_rev(struct device *device, struct device_attribute *attr, @@ -955,7 +876,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr, { struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "%x\n", dev->mdev.rev_id); + return sprintf(buf, "%x\n", dev->mdev->rev_id); } static ssize_t show_board(struct device *device, struct device_attribute *attr, @@ -964,7 +885,7 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr, struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, - dev->mdev.board_id); + dev->mdev->board_id); } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); @@ -983,11 +904,12 @@ static struct device_attribute *mlx5_class_attributes[] = { &dev_attr_reg_pages, }; -static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, - void *data) +static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, + enum mlx5_dev_event event, void *data) { - struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev); + struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; struct ib_event ibev; + u8 port = 0; switch (event) { @@ -1047,7 +969,7 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev) { int port; - for (port = 1; port <= dev->mdev.caps.num_ports; port++) + for (port = 1; port <= dev->mdev->caps.num_ports; port++) mlx5_query_ext_port_caps(dev, port); } @@ -1072,14 +994,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev) goto out; } - for (port = 1; port <= dev->mdev.caps.num_ports; port++) { + for (port = 1; port <= dev->mdev->caps.num_ports; port++) { err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); if (err) { mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); break; } - dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys; - dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; + dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", dprops->max_pkeys, pprops->gid_tbl_len); } @@ -1328,10 +1250,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr) mlx5_ib_dealloc_pd(devr->p0); } -static int init_one(struct pci_dev *pdev, - const struct pci_device_id *id) +static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { - struct mlx5_core_dev *mdev; struct mlx5_ib_dev *dev; int err; int i; @@ -1340,28 +1260,19 @@ static int init_one(struct pci_dev *pdev, dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); if (!dev) - return -ENOMEM; + return NULL; - mdev = &dev->mdev; - mdev->event = mlx5_ib_event; - if (prof_sel >= ARRAY_SIZE(profile)) { - pr_warn("selected pofile out of range, selceting default\n"); - prof_sel = 0; - } - mdev->profile = &profile[prof_sel]; - err = mlx5_dev_init(mdev, pdev); - if (err) - goto err_free; + dev->mdev = mdev; err = get_port_caps(dev); if (err) - goto err_cleanup; + goto err_dealloc; get_ext_port_caps(dev); err = alloc_comp_eqs(dev); if (err) - goto err_cleanup; + goto err_dealloc; MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); @@ -1480,7 +1391,7 @@ static int init_one(struct pci_dev *pdev, dev->ib_active = true; - return 0; + return dev; err_umrc: destroy_umrc_res(dev); @@ -1494,49 +1405,39 @@ err_rsrc: err_eqs: free_comp_eqs(dev); -err_cleanup: - mlx5_dev_cleanup(mdev); - -err_free: +err_dealloc: ib_dealloc_device((struct ib_device *)dev); - return err; + return NULL; } -static void remove_one(struct pci_dev *pdev) +static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) { - struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev); - + struct mlx5_ib_dev *dev = context; destroy_umrc_res(dev); ib_unregister_device(&dev->ib_dev); destroy_dev_resources(&dev->devr); free_comp_eqs(dev); - mlx5_dev_cleanup(&dev->mdev); ib_dealloc_device(&dev->ib_dev); } -static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = { - { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table); - -static struct pci_driver mlx5_ib_driver = { - .name = DRIVER_NAME, - .id_table = mlx5_ib_pci_table, - .probe = init_one, - .remove = remove_one +static struct mlx5_interface mlx5_ib_interface = { + .add = mlx5_ib_add, + .remove = mlx5_ib_remove, + .event = mlx5_ib_event, }; static int __init mlx5_ib_init(void) { - return pci_register_driver(&mlx5_ib_driver); + if (deprecated_prof_sel != 2) + pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); + + return mlx5_register_interface(&mlx5_ib_interface); } static void __exit mlx5_ib_cleanup(void) { - pci_unregister_driver(&mlx5_ib_driver); + mlx5_unregister_interface(&mlx5_ib_interface); } module_init(mlx5_ib_init); -- cgit v1.2.3-70-g09d2 From f241e7497ec2d22b83002b17ae91a851d4034cb7 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 28 Jul 2014 23:30:23 +0300 Subject: mlx5: minor fixes (mainly avoidance of hidden casts) There were many places where parameters which should be u8/u16 were integer type. Additionally, in 2 places, a check for a non-null pointer was added before dereferencing the pointer (this is actually a bug fix). Signed-off-by: Jack Morgenstein Signed-off-by: Eli Cohen Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/cq.c | 2 +- drivers/infiniband/hw/mlx5/mad.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mlx5/mem.c | 2 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 +- drivers/infiniband/hw/mlx5/qp.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/mad.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 2 +- include/linux/mlx5/device.h | 4 ---- include/linux/mlx5/driver.h | 8 ++++---- 15 files changed, 19 insertions(+), 22 deletions(-) (limited to 'drivers/infiniband/hw/mlx5/main.c') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 3b4dc858cef..e4056279166 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, u16 tail, u16 head) { - int idx; + u16 idx; do { idx = tail & (qp->sq.wqe_cnt - 1); diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index e259e739315..b514bbb5610 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -41,7 +41,7 @@ enum { }; int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, void *in_mad, void *response_mad) { u8 op_modifier = 0; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index f2cfd363a70..166335a95c5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -478,7 +478,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, int uuarn; int err; int i; - int reqlen; + size_t reqlen; if (!dev->ib_active) return ERR_PTR(-EAGAIN); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 8499aec94db..a3e81444c82 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) u64 off_mask; u64 buf_off; - page_size = 1 << page_shift; + page_size = (u64)1 << page_shift; page_mask = page_size - 1; buf_off = addr & page_mask; off_size = page_size >> 6; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index a0e204ffe36..386780f0d1e 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -461,7 +461,7 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, void *in_mad, void *response_mad); struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, struct mlx5_ib_ah *ah); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index b8bb6ad6350..7efe6e3f354 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2539,7 +2539,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(seg, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); - seg += sizeof(struct mlx5_wqe_raddr_seg); + seg += sizeof(struct mlx5_wqe_raddr_seg); size += sizeof(struct mlx5_wqe_raddr_seg) / 16; break; @@ -2668,7 +2668,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_QPT_SMI: case IB_QPT_GSI: set_datagram_seg(seg, wr); - seg += sizeof(struct mlx5_wqe_datagram_seg); + seg += sizeof(struct mlx5_wqe_datagram_seg); size += sizeof(struct mlx5_wqe_datagram_seg) / 16; if (unlikely((seg == qend))) seg = mlx5_get_send_wqe(qp, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index b215742b842..56779c1c781 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -56,7 +56,7 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, if (size <= max_direct) { buf->nbufs = 1; buf->npages = 1; - buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, size, &t, GFP_KERNEL); if (!buf->direct.buf) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 87d1b018a9c..4671747dd36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -464,7 +464,7 @@ static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; struct mlx5_cmd_mailbox *next = msg->next; int data_only; - int offset = 0; + u32 offset = 0; int dump_len; data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 7f39ebcd6ad..67cead2c079 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -252,7 +252,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_PORT_CHANGE_SUBTYPE_GUID: case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: - dev->event(dev, port_subtype_event(eqe->sub_type), &port); + if (dev->event) + dev->event(dev, port_subtype_event(eqe->sub_type), &port); break; default: mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c index 18d6fd5dd90..fd80ecfa719 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -37,7 +37,7 @@ #include "mlx5_core.h" int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, - u16 opmod, int port) + u16 opmod, u8 port) { struct mlx5_mad_ifc_mbox_in *in = NULL; struct mlx5_mad_ifc_mbox_out *out = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4b7f9da4bf1..fd782bf49dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -311,7 +311,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); - if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) + if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; flags = be64_to_cpu(query_out->hca_cap.flags); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index c2a953ef0e6..d476918ef26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -51,7 +51,7 @@ enum { struct mlx5_pages_req { struct mlx5_core_dev *dev; - u32 func_id; + u16 func_id; s32 npages; struct work_struct work; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 8c9ac870ecb..313965853e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -86,7 +86,7 @@ struct mlx5_reg_pcap { __be32 caps_31_0; }; -int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) { struct mlx5_reg_pcap in; struct mlx5_reg_pcap out; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 3406cfb1267..334947151df 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -456,9 +456,6 @@ struct mlx5_eqe_cq_err { u8 syndrome; }; -struct mlx5_eqe_dropped_packet { -}; - struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; @@ -498,7 +495,6 @@ union ev_data { struct mlx5_eqe_comp comp; struct mlx5_eqe_qp_srq qp_srq; struct mlx5_eqe_cq_err cq_err; - struct mlx5_eqe_dropped_packet dp; struct mlx5_eqe_port_state port; struct mlx5_eqe_gpio gpio; struct mlx5_eqe_congestion cong; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d0cb5984a45..76de0cc4164 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -381,8 +381,8 @@ struct mlx5_buf { struct mlx5_buf_list *page_list; int nbufs; int npages; - int page_shift; int size; + u8 page_shift; }; struct mlx5_eq { @@ -736,7 +736,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, - u16 opmod, int port); + u16 opmod, u8 port); void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); @@ -769,7 +769,7 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); @@ -826,7 +826,7 @@ void mlx5_unregister_interface(struct mlx5_interface *intf); struct mlx5_profile { u64 mask; - u32 log_max_qp; + u8 log_max_qp; struct { int size; int limit; -- cgit v1.2.3-70-g09d2 From 4d2f9bbb654b91a262638ac2c84dcb169d014aa6 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 28 Jul 2014 23:30:24 +0300 Subject: mlx5: Adjust events to use unsigned long param instead of void * In the event flow, we currently pass only a port number in the void *data argument. Rather than pass a pointer to the event handlers, we should use an "unsigned long" parameter, and pass the port number value directly. In the future, if necessary for some events, we can use the unsigned long parameter to pass a pointer. Based on a patch by Eli Cohen Signed-off-by: Jack Morgenstein Signed-off-by: Eli Cohen Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 14 +++++++------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 ++-- include/linux/mlx5/driver.h | 4 ++-- 4 files changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers/infiniband/hw/mlx5/main.c') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 166335a95c5..d8907b20522 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -905,7 +905,7 @@ static struct device_attribute *mlx5_class_attributes[] = { }; static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, - enum mlx5_dev_event event, void *data) + enum mlx5_dev_event event, unsigned long param) { struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; struct ib_event ibev; @@ -920,12 +920,12 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, case MLX5_DEV_EVENT_PORT_UP: ibev.event = IB_EVENT_PORT_ACTIVE; - port = *(u8 *)data; + port = (u8)param; break; case MLX5_DEV_EVENT_PORT_DOWN: ibev.event = IB_EVENT_PORT_ERR; - port = *(u8 *)data; + port = (u8)param; break; case MLX5_DEV_EVENT_PORT_INITIALIZED: @@ -934,22 +934,22 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; - port = *(u8 *)data; + port = (u8)param; break; case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; - port = *(u8 *)data; + port = (u8)param; break; case MLX5_DEV_EVENT_GUID_CHANGE: ibev.event = IB_EVENT_GID_CHANGE; - port = *(u8 *)data; + port = (u8)param; break; case MLX5_DEV_EVENT_CLIENT_REREG: ibev.event = IB_EVENT_CLIENT_REREGISTER; - port = *(u8 *)data; + port = (u8)param; break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 67cead2c079..4e8bd0b34bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -253,7 +253,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: if (dev->event) - dev->event(dev, port_subtype_event(eqe->sub_type), &port); + dev->event(dev, port_subtype_event(eqe->sub_type), + (unsigned long)port); break; default: mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index fd782bf49dc..f2716cc1f51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -704,7 +704,7 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) EXPORT_SYMBOL(mlx5_unregister_interface); static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, - void *data) + unsigned long param) { struct mlx5_priv *priv = &dev->priv; struct mlx5_device_context *dev_ctx; @@ -714,7 +714,7 @@ static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) - dev_ctx->intf->event(dev, dev_ctx->context, event, data); + dev_ctx->intf->event(dev, dev_ctx->context, event, param); spin_unlock_irqrestore(&priv->ctx_lock, flags); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 76de0cc4164..9f3a5476bb7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -559,7 +559,7 @@ struct mlx5_core_dev { struct mlx5_init_seg __iomem *iseg; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, - void *data); + unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; @@ -817,7 +817,7 @@ struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, - enum mlx5_dev_event event, void *data); + enum mlx5_dev_event event, unsigned long param); struct list_head list; }; -- cgit v1.2.3-70-g09d2