diff options
author | Auke Kok <auke-jan.h.kok@intel.com> | 2008-06-27 11:00:18 -0700 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-07-04 08:46:59 -0400 |
commit | 652fff321490fc3fcc8e8d302826a9c2379f03d2 (patch) | |
tree | 00e9675980cbee174305533dd3d0274715afa5e6 /drivers/net/igb/e1000_mac.c | |
parent | d67ce5338c7c71313f01e508d893bb8104ce459a (diff) |
igb: eliminate hw from the hw_dbg macro arguments
Various cosmetic cleanups. Comment fixes. Eliminate the hw part out
of the hw_dbg macro since it's always used.
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/igb/e1000_mac.c')
-rw-r--r-- | drivers/net/igb/e1000_mac.c | 75 |
1 files changed, 37 insertions, 38 deletions
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 0dadcfdfa17..47ad2c4277c 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -158,12 +158,12 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) u32 i; /* Setup the receive address */ - hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); + hw_dbg("Programming MAC Address into RAR[0]\n"); hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ - hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); for (i = 1; i < rar_count; i++) { array_wr32(E1000_RA, (i << 1), 0); wrfl(); @@ -193,7 +193,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &nvm_alt_mac_addr_offset); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + hw_dbg("NVM Read Error\n"); goto out; } @@ -209,7 +209,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) offset = nvm_alt_mac_addr_offset + (i >> 1); ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + hw_dbg("NVM Read Error\n"); goto out; } @@ -336,7 +336,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, } /* Clear the old settings from the MTA */ - hw_dbg(hw, "Clearing MTA\n"); + hw_dbg("Clearing MTA\n"); for (i = 0; i < hw->mac.mta_reg_count; i++) { array_wr32(E1000_MTA, i, 0); wrfl(); @@ -345,7 +345,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, /* Load any remaining multicast addresses into the hash table. */ for (; mc_addr_count > 0; mc_addr_count--) { hash_value = igb_hash_mc_addr(hw, mc_addr_list); - hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); + hw_dbg("Hash value = 0x%03X\n", hash_value); igb_mta_set(hw, hash_value); mc_addr_list += ETH_ALEN; } @@ -540,7 +540,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) */ ret_val = igb_config_fc_after_link_up(hw); if (ret_val) - hw_dbg(hw, "Error configuring flow control\n"); + hw_dbg("Error configuring flow control\n"); out: return ret_val; @@ -578,7 +578,7 @@ s32 igb_setup_link(struct e1000_hw *hw) */ hw->fc.original_type = hw->fc.type; - hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.type); /* Call the necessary media_type subroutine to configure the link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); @@ -591,8 +591,7 @@ s32 igb_setup_link(struct e1000_hw *hw) * control is disabled, because it does not hurt anything to * initialize these registers. */ - hw_dbg(hw, - "Initializing the Flow Control address, type and timer regs\n"); + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); wr32(E1000_FCT, FLOW_CONTROL_TYPE); wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); @@ -689,7 +688,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + hw_dbg("NVM Read Error\n"); goto out; } @@ -740,7 +739,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) * 3: Both Rx and TX flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ - hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type); + hw_dbg("hw->fc.type = %u\n", hw->fc.type); switch (hw->fc.type) { case e1000_fc_none: @@ -758,7 +757,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); + hw_dbg("Flow control param set incorrectly\n"); ret_val = -E1000_ERR_CONFIG; goto out; } @@ -801,7 +800,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) } if (ret_val) { - hw_dbg(hw, "Error forcing flow control settings\n"); + hw_dbg("Error forcing flow control settings\n"); goto out; } @@ -827,7 +826,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) goto out; if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - hw_dbg(hw, "Copper PHY and Auto Neg " + hw_dbg("Copper PHY and Auto Neg " "has not completed.\n"); goto out; } @@ -893,11 +892,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ if (hw->fc.original_type == e1000_fc_full) { hw->fc.type = e1000_fc_full; - hw_dbg(hw, "Flow Control = FULL.\r\n"); + hw_dbg("Flow Control = FULL.\r\n"); } else { hw->fc.type = e1000_fc_rx_pause; - hw_dbg(hw, "Flow Control = " - "RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = " + "RX PAUSE frames only.\r\n"); } } /* @@ -913,7 +912,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.type = e1000_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); } /* * For transmitting PAUSE frames ONLY. @@ -928,7 +927,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.type = e1000_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); } /* * Per the IEEE spec, at this point flow control should be @@ -955,10 +954,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) hw->fc.original_type == e1000_fc_tx_pause) || hw->fc.strict_ieee) { hw->fc.type = e1000_fc_none; - hw_dbg(hw, "Flow Control = NONE.\r\n"); + hw_dbg("Flow Control = NONE.\r\n"); } else { hw->fc.type = e1000_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); } /* @@ -968,7 +967,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) { - hw_dbg(hw, "Error getting link speed and duplex\n"); + hw_dbg("Error getting link speed and duplex\n"); goto out; } @@ -981,7 +980,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = igb_force_mac_fc(hw); if (ret_val) { - hw_dbg(hw, "Error forcing flow control settings\n"); + hw_dbg("Error forcing flow control settings\n"); goto out; } } @@ -1007,21 +1006,21 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, status = rd32(E1000_STATUS); if (status & E1000_STATUS_SPEED_1000) { *speed = SPEED_1000; - hw_dbg(hw, "1000 Mbs, "); + hw_dbg("1000 Mbs, "); } else if (status & E1000_STATUS_SPEED_100) { *speed = SPEED_100; - hw_dbg(hw, "100 Mbs, "); + hw_dbg("100 Mbs, "); } else { *speed = SPEED_10; - hw_dbg(hw, "10 Mbs, "); + hw_dbg("10 Mbs, "); } if (status & E1000_STATUS_FD) { *duplex = FULL_DUPLEX; - hw_dbg(hw, "Full Duplex\n"); + hw_dbg("Full Duplex\n"); } else { *duplex = HALF_DUPLEX; - hw_dbg(hw, "Half Duplex\n"); + hw_dbg("Half Duplex\n"); } return 0; @@ -1051,7 +1050,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw) } if (i == timeout) { - hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); + hw_dbg("Driver can't access device - SMBI bit is set.\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -1071,7 +1070,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw) if (i == timeout) { /* Release semaphores */ igb_put_hw_semaphore(hw); - hw_dbg(hw, "Driver can't access the NVM\n"); + hw_dbg("Driver can't access the NVM\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -1117,7 +1116,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw) } if (i == AUTO_READ_DONE_TIMEOUT) { - hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); + hw_dbg("Auto read by HW from NVM has not completed.\n"); ret_val = -E1000_ERR_RESET; goto out; } @@ -1140,7 +1139,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + hw_dbg("NVM Read Error\n"); goto out; } @@ -1322,7 +1321,7 @@ s32 igb_disable_pcie_master(struct e1000_hw *hw) } if (!timeout) { - hw_dbg(hw, "Master requests are pending.\n"); + hw_dbg("Master requests are pending.\n"); ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; goto out; } @@ -1342,7 +1341,7 @@ void igb_reset_adaptive(struct e1000_hw *hw) struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { - hw_dbg(hw, "Not in Adaptive IFS mode!\n"); + hw_dbg("Not in Adaptive IFS mode!\n"); goto out; } @@ -1372,7 +1371,7 @@ void igb_update_adaptive(struct e1000_hw *hw) struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { - hw_dbg(hw, "Not in Adaptive IFS mode!\n"); + hw_dbg("Not in Adaptive IFS mode!\n"); goto out; } @@ -1413,7 +1412,7 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw) s32 ret_val = 0; if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { - hw_dbg(hw, "Invalid MDI setting detected\n"); + hw_dbg("Invalid MDI setting detected\n"); hw->phy.mdix = 1; ret_val = -E1000_ERR_CONFIG; goto out; @@ -1452,7 +1451,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, break; } if (!(regvalue & E1000_GEN_CTL_READY)) { - hw_dbg(hw, "Reg %08x did not indicate ready\n", reg); + hw_dbg("Reg %08x did not indicate ready\n", reg); ret_val = -E1000_ERR_PHY; goto out; } |