diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/main.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/main.c | 316 |
1 files changed, 151 insertions, 165 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index cef267e24f9..5f42f6d6e4c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); -#define HCA_GLOBAL_CAP_MASK 0 - #define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE static char mlx4_version[] = @@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); -MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " - "(0/1, default 0)"); +MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); @@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev, for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { - mlx4_err(dev, "Only same port types supported " - "on this HCA, aborting.\n"); + mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); return -EINVAL; } } @@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev, for (i = 0; i < dev->caps.num_ports; i++) { if (!(port_type[i] & dev->caps.supported_type[i+1])) { - mlx4_err(dev, "Requested port type for port %d is not " - "supported on this HCA\n", i + 1); + mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", + i + 1); return -EINVAL; } } @@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; } if (dev_cap->min_page_sz > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", dev_cap->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_cap->num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", dev_cap->num_ports, MLX4_MAX_PORTS); return -ENODEV; } if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", + mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev_cap->uar_size, (unsigned long long) pci_resource_len(dev->pdev, 2)); return -ENODEV; @@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; - dev->caps.log_num_prios = use_prio ? 3 : 0; for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; @@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { dev->caps.log_num_macs = dev_cap->log_max_macs[i]; - mlx4_warn(dev, "Requested number of MACs is too much " - "for port %d, reducing to %d.\n", + mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_macs); } if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; - mlx4_warn(dev, "Requested number of VLANs is too much " - "for port %d, reducing to %d.\n", + mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_vlans); } } @@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = (1 << dev->caps.log_num_macs) * (1 << dev->caps.log_num_vlans) * - (1 << dev->caps.log_num_prios) * dev->caps.num_ports; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; @@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); if (err) { - mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); + mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); return err; } - /*fail if the hca has an unknown capability */ - if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != - HCA_GLOBAL_CAP_MASK) { + /* fail if the hca has an unknown global capability + * at this time global_caps should be always zeroed + */ + if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); return -ENOSYS; } @@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; } err = mlx4_QUERY_FW(dev); if (err) - mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); + mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", page_size, PAGE_SIZE); return -ENODEV; } @@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", - err); + mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", + err); return err; } @@ -661,18 +650,20 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.num_amgms = 0; if (dev->caps.num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; } + dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || - !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { + !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || + !dev->caps.qp0_qkey) { err = -ENOMEM; goto err_mem; } @@ -680,10 +671,11 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) for (i = 1; i <= dev->caps.num_ports; ++i) { err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" - " port %d, aborting (%d).\n", i, err); + mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", + i, err); goto err_mem; } + dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; @@ -699,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", + mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev->caps.uar_page_size * dev->caps.num_uars, (unsigned long long) pci_resource_len(dev->pdev, 2)); goto err_mem; @@ -722,19 +713,23 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) } dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); + mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); slave_adjust_steering_mode(dev, &dev_cap, &hca_param); return 0; err_mem: + kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); - dev->caps.qp0_tunnel = dev->caps.qp0_proxy = - dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; + dev->caps.qp0_qkey = NULL; + dev->caps.qp0_tunnel = NULL; + dev->caps.qp0_proxy = NULL; + dev->caps.qp1_tunnel = NULL; + dev->caps.qp1_proxy = NULL; return err; } @@ -754,10 +749,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev) has_eth_port = true; } - if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) - request_module_nowait(IB_DRV_NAME); if (has_eth_port) request_module_nowait(EN_DRV_NAME); + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + request_module_nowait(IB_DRV_NAME); } /* @@ -784,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev, dev->caps.port_type[port] = port_types[port - 1]; err = mlx4_SET_PORT(dev, port, -1); if (err) { - mlx4_err(dev, "Failed to set port %d, " - "aborting\n", port); + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); goto out; } } @@ -868,9 +863,7 @@ static ssize_t set_port_type(struct device *dev, } } if (err) { - mlx4_err(mdev, "Auto sensing is not supported on this HCA. " - "Set only 'eth' or 'ib' for both ports " - "(should be the same)\n"); + mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); goto out; } @@ -975,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev, mlx4_CLOSE_PORT(mdev, port); err = mlx4_SET_PORT(mdev, port, -1); if (err) { - mlx4_err(mdev, "Failed to set port %d, " - "aborting\n", port); + mlx4_err(mdev, "Failed to set port %d, aborting\n", + port); goto err_set_port; } } @@ -995,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev) priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.fw_icm) { - mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); + mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); return -ENOMEM; } err = mlx4_MAP_FA(dev, priv->fw.fw_icm); if (err) { - mlx4_err(dev, "MAP_FA command failed, aborting.\n"); + mlx4_err(dev, "MAP_FA command failed, aborting\n"); goto err_free; } err = mlx4_RUN_FW(dev); if (err) { - mlx4_err(dev, "RUN_FW command failed, aborting.\n"); + mlx4_err(dev, "RUN_FW command failed, aborting\n"); goto err_unmap_fa; } @@ -1091,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); if (err) { - mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); + mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); return err; } - mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", + mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2); priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.aux_icm) { - mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); + mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); return -ENOMEM; } err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); if (err) { - mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); + mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); goto err_free_aux; } err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); if (err) { - mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); goto err_unmap_aux; } @@ -1125,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); goto err_unmap_cmpt; } @@ -1146,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.num_mtts, dev->caps.reserved_mtts, 1, 0); if (err) { - mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); goto err_unmap_eq; } @@ -1156,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.num_mpts, dev->caps.reserved_mrws, 1, 1); if (err) { - mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); goto err_unmap_mtt; } @@ -1167,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); + mlx4_err(dev, "Failed to map QP context memory, aborting\n"); goto err_unmap_dmpt; } @@ -1178,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); + mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); goto err_unmap_qp; } @@ -1189,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); + mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); goto err_unmap_auxc; } @@ -1210,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); goto err_unmap_rdmarc; } @@ -1220,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); goto err_unmap_cq; } @@ -1238,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); if (err) { - mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); + mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); goto err_unmap_srq; } @@ -1315,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev) mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) - mlx4_warn(dev, "Failed to close slave function.\n"); + mlx4_warn(dev, "Failed to close slave function\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); } @@ -1413,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev) u32 cmd_channel_ver; if (atomic_read(&pf_loading)) { - mlx4_warn(dev, "PF is not ready. Deferring probe\n"); + mlx4_warn(dev, "PF is not ready - Deferring probe\n"); return -EPROBE_DEFER; } @@ -1426,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev) * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { - mlx4_warn(dev, "slave is currently in the " - "middle of FLR. Deferring probe.\n"); + mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EPROBE_DEFER; } else @@ -1441,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev) if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != MLX4_COMM_GET_IF_REV(slave_read)) { - mlx4_err(dev, "slave driver version is not supported" - " by the master\n"); + mlx4_err(dev, "slave driver version is not supported by the master\n"); goto err; } @@ -1520,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev, if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) - mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " - "set to use B0 steering. Falling back to A0 steering mode.\n"); + mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); } dev->oper_log_mgm_entry_size = mlx4_log_num_mgm_entry_size > 0 ? @@ -1529,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev, MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); } - mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " - "modparam log_num_mgm_entry_size = %d\n", + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", mlx4_steering_mode_str(dev->caps.steering_mode), dev->oper_log_mgm_entry_size, mlx4_log_num_mgm_entry_size); @@ -1564,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_QUERY_FW(dev); if (err) { if (err == -EACCES) - mlx4_info(dev, "non-primary physical function, skipping.\n"); + mlx4_info(dev, "non-primary physical function, skipping\n"); else - mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + mlx4_err(dev, "QUERY_FW command failed, aborting\n"); return err; } err = mlx4_load_fw(dev); if (err) { - mlx4_err(dev, "Failed to start FW, aborting.\n"); + mlx4_err(dev, "Failed to start FW, aborting\n"); return err; } @@ -1584,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_dev_cap(dev, &dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); goto err_stop_fw; } @@ -1625,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_INIT_HCA(dev, &init_hca); if (err) { - mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); + mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } /* @@ -1636,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) memset(&init_hca, 0, sizeof(init_hca)); err = mlx4_QUERY_HCA(dev, &init_hca); if (err) { - mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); + mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = @@ -1649,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev) if (!dev->caps.hca_core_clock) { dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, - "HCA frequency is 0. Timestamping is not supported."); + "HCA frequency is 0 - timestamping is not supported\n"); } else if (map_internal_clock(dev)) { /* * Map internal clock, * in case of failure disable timestamping */ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); + mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); } } } else { @@ -1683,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { - mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); + mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); goto unmap_bf; } @@ -1696,6 +1685,14 @@ unmap_bf: unmap_internal_clock(dev); unmap_bf_area(dev); + if (mlx4_is_slave(dev)) { + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + } + err_close: if (mlx4_is_slave(dev)) mlx4_slave_exit(dev); @@ -1793,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err = mlx4_init_uar_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "user access region table, aborting.\n"); - return err; + mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); + return err; } err = mlx4_uar_alloc(dev, &priv->driver_uar); if (err) { - mlx4_err(dev, "Failed to allocate driver access region, " - "aborting.\n"); + mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); goto err_uar_table_free; } priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!priv->kar) { - mlx4_err(dev, "Couldn't map kernel access region, " - "aborting.\n"); + mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); err = -ENOMEM; goto err_uar_free; } err = mlx4_init_pd_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "protection domain table, aborting.\n"); + mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); goto err_kar_unmap; } err = mlx4_init_xrcd_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "reliable connection domain table, aborting.\n"); + mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); goto err_pd_table_free; } err = mlx4_init_mr_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "memory region table, aborting.\n"); + mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); goto err_xrcd_table_free; } if (!mlx4_is_slave(dev)) { err = mlx4_init_mcg_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); + mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); goto err_mr_table_free; } } err = mlx4_init_eq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "event queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); goto err_mcg_table_free; } err = mlx4_cmd_use_events(dev); if (err) { - mlx4_err(dev, "Failed to switch to event-driven " - "firmware commands, aborting.\n"); + mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); goto err_eq_table_free; } err = mlx4_NOP(dev); if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { - mlx4_warn(dev, "NOP command failed to generate MSI-X " - "interrupt IRQ %d).\n", + mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); - mlx4_warn(dev, "Trying again without MSI-X.\n"); + mlx4_warn(dev, "Trying again without MSI-X\n"); } else { - mlx4_err(dev, "NOP command failed to generate interrupt " - "(IRQ %d), aborting.\n", + mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@ -1877,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err = mlx4_init_cq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "completion queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); goto err_cmd_poll; } err = mlx4_init_srq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "shared receive queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); goto err_cq_table_free; } err = mlx4_init_qp_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "queue pair table, aborting.\n"); + mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); goto err_srq_table_free; } err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { - mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); + mlx4_err(dev, "Failed to initialize counters table, aborting\n"); goto err_qp_table_free; } @@ -1908,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); if (err) - mlx4_warn(dev, "failed to get port %d default " - "ib capabilities (%d). Continuing " - "with caps = 0\n", port, err); + mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", + port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; /* initialize per-slave default ib port capabilities */ @@ -1920,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) if (i == mlx4_master_func_num(dev)) continue; priv->mfunc.master.slave_state[i].ib_cap_mask[port] = - ib_port_default_caps; + ib_port_default_caps; } } @@ -1933,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) dev->caps.pkey_table_len[port] : -1); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", - port); + port); goto err_counters_table_free; } } @@ -2009,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) kfree(entries); goto no_msi; } else if (nreq < MSIX_LEGACY_SZ + - dev->caps.num_ports * MIN_MSIX_P_PORT) { + dev->caps.num_ports * MIN_MSIX_P_PORT) { /*Working in legacy mode , all EQ's shared*/ dev->caps.comp_pool = 0; dev->caps.num_comp_vectors = nreq - 1; @@ -2044,6 +2027,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) if (!mlx4_is_slave(dev)) { mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); + mlx4_init_roce_gid_table(dev, &info->gid_table); info->base_qpn = mlx4_get_base_qpn(dev, port); } @@ -2209,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } @@ -2257,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) */ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing DCS, aborting." - "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", pci_dev_data, pci_resource_flags(pdev, 0)); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing UAR, aborting.\n"); + dev_err(&pdev->dev, "Missing UAR, aborting\n"); err = -ENODEV; goto err_disable_pdev; } @@ -2279,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " - "consistent PCI DMA mask.\n"); + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " - "aborting.\n"); + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); goto err_release_regions; } } @@ -2324,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) if (total_vfs) { unsigned vfs_offset = 0; for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && - vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset + nvfs[i] < extended_func_num(pdev); vfs_offset += nvfs[i], i++) ; if (i == sizeof(nvfs)/sizeof(nvfs[0])) { @@ -2350,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) if (err < 0) goto err_free_dev; else { - mlx4_warn(dev, "Multiple PFs not yet supported." - " Skipping PF.\n"); + mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); err = -EINVAL; goto err_free_dev; } @@ -2361,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); dev->dev_vfs = kzalloc( - total_vfs * sizeof(*dev->dev_vfs), - GFP_KERNEL); + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); if (NULL == dev->dev_vfs) { mlx4_err(dev, "Failed to allocate memory for VFs\n"); err = 0; @@ -2370,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) atomic_inc(&pf_loading); err = pci_enable_sriov(pdev, total_vfs); if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", err); atomic_dec(&pf_loading); err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; + MLX4_FLAG_MASTER; dev->num_vfs = total_vfs; sriov_initialized = 1; } @@ -2394,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) */ err = mlx4_reset(dev); if (err) { - mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + mlx4_err(dev, "Failed to reset HCA, aborting\n"); goto err_rel_own; } } @@ -2402,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) slave_start: err = mlx4_cmd_init(dev); if (err) { - mlx4_err(dev, "Failed to init command interface, aborting.\n"); + mlx4_err(dev, "Failed to init command interface, aborting\n"); goto err_sriov; } @@ -2416,8 +2395,7 @@ slave_start: dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { - mlx4_err(dev, "Failed to init slave mfunc" - " interface, aborting.\n"); + mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); goto err_cmd; } } @@ -2440,7 +2418,8 @@ slave_start: * No return code for this call, just warn the user in case of PCI * express device capabilities are under-satisfied by the bus. */ - mlx4_check_pcie_caps(dev); + if (!mlx4_is_slave(dev)) + mlx4_check_pcie_caps(dev); /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ @@ -2448,8 +2427,7 @@ slave_start: unsigned sum = 0; err = mlx4_multi_func_init(dev); if (err) { - mlx4_err(dev, "Failed to init master mfunc" - "interface, aborting.\n"); + mlx4_err(dev, "Failed to init master mfunc interface, aborting\n"); goto err_close; } if (sriov_initialized) { @@ -2460,10 +2438,7 @@ slave_start: if (ib_ports && (num_vfs_argc > 1 || probe_vfs_argc > 1)) { mlx4_err(dev, - "Invalid syntax of num_vfs/probe_vfs " - "with IB port. Single port VFs syntax" - " is only supported when all ports " - "are configured as ethernet\n"); + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); goto err_close; } for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { @@ -2489,8 +2464,7 @@ slave_start: if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { err = -ENOSYS; - mlx4_err(dev, "INTx is not supported in multi-function mode." - " aborting.\n"); + mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; } @@ -2564,6 +2538,14 @@ err_master_mfunc: if (mlx4_is_master(dev)) mlx4_multi_func_cleanup(dev); + if (mlx4_is_slave(dev)) { + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + } + err_close: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); @@ -2635,7 +2617,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev) /* in SRIOV it is not allowed to unload the pf's * driver while there are alive vf's */ if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) - printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); + pr_warn("Removing PF when there are assigned VF's !!!\n"); mlx4_stop_sense(dev); mlx4_unregister_device(dev); @@ -2687,6 +2669,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev) if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); + kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); @@ -2798,7 +2781,7 @@ static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, - .shutdown = mlx4_remove_one, + .shutdown = __mlx4_remove_one, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; @@ -2806,33 +2789,36 @@ static struct pci_driver mlx4_driver = { static int __init mlx4_verify_params(void) { if ((log_num_mac < 0) || (log_num_mac > 7)) { - pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); + pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; } if (log_num_vlan != 0) - pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", - MLX4_LOG_NUM_VLANS); + pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", + MLX4_LOG_NUM_VLANS); + + if (use_prio != 0) + pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { - pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", + log_mtts_per_seg); return -1; } /* Check if module param for ports type has legal combination */ if (port_type_array[0] == false && port_type_array[1] == true) { - printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); + pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); port_type_array[0] = true; } if (mlx4_log_num_mgm_entry_size != -1 && (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { - pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " - "in legal range (-1 or %d..%d)\n", - mlx4_log_num_mgm_entry_size, - MLX4_MIN_MGM_LOG_ENTRY_SIZE, - MLX4_MAX_MGM_LOG_ENTRY_SIZE); + pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n", + mlx4_log_num_mgm_entry_size, + MLX4_MIN_MGM_LOG_ENTRY_SIZE, + MLX4_MAX_MGM_LOG_ENTRY_SIZE); return -1; } |