summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c155
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c113
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/hisax/fsm.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c128
-rw-r--r--drivers/net/caif/caif_shmcore.c747
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c24
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c16
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c325
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c101
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c327
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c389
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h18
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c33
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c778
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c14
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c20
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c9
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c60
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.c30
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c13
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c131
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h25
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c177
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c195
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c268
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c130
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c83
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c32
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c32
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c20
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c21
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c220
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c163
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h26
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c53
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c255
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c9
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c267
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c17
-rw-r--r--drivers/net/ethernet/sfc/filter.c249
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h97
-rw-r--r--drivers/net/ethernet/sfc/nic.c94
-rw-r--r--drivers/net/ethernet/sfc/ptp.c116
-rw-r--r--drivers/net/ethernet/sfc/rx.c793
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c24
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c13
-rw-r--r--drivers/net/ethernet/sun/sunqe.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c159
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c21
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/fddi/defxx.c9
-rw-r--r--drivers/net/ieee802154/mrf24j40.c15
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c5
-rw-r--r--drivers/net/irda/bfin_sir.c3
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c17
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/irda/w83977af_ir.c7
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c127
-rw-r--r--drivers/net/phy/micrel.c41
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/team/Kconfig12
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/team/team_mode_broadcast.c14
-rw-r--r--drivers/net/team/team_mode_random.c71
-rw-r--r--drivers/net/team/team_mode_roundrobin.c36
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/vxlan.c298
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/common.c15
-rw-r--r--drivers/net/wireless/iwlegacy/common.h5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c4
-rw-r--r--drivers/scsi/csiostor/Makefile3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c559
-rw-r--r--drivers/scsi/csiostor/csio_hw.h47
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h175
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c403
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c397
-rw-r--r--drivers/scsi/csiostor/csio_init.c48
-rw-r--r--drivers/scsi/csiostor/csio_init.h29
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c10
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c60
228 files changed, 7593 insertions, 4478 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a3fde52840c..65c30ea8c1a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
+ struct cpl_t5_act_open_req *t5_req;
struct sk_buff *skb;
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
int wscale;
- int wrlen = roundup(sizeof *req, 16);
+ int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+ sizeof(struct cpl_act_open_req) :
+ sizeof(struct cpl_t5_act_open_req);
+ int wrlen = roundup(size, 16);
PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= WND_SCALE_EN(1);
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
- req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
- req->opt2 = cpu_to_be32(opt2);
+ if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ req->local_port = ep->com.local_addr.sin_port;
+ req->peer_port = ep->com.remote_addr.sin_port;
+ req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev,
+ ep->dst, ep->l2t));
+ req->opt2 = cpu_to_be32(opt2);
+ } else {
+ t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(t5_req, 0);
+ OPCODE_TID(t5_req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ t5_req->local_port = ep->com.local_addr.sin_port;
+ t5_req->peer_port = ep->com.remote_addr.sin_port;
+ t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ t5_req->opt0 = cpu_to_be64(opt0);
+ t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
+ t5_req->opt2 = cpu_to_be32(opt2);
+ }
+
set_bit(ACT_OPEN_REQ, &ep->com.history);
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -1676,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_TIMEDOUT:
break;
case CPL_ERR_TCAM_FULL:
+ dev->rdev.stats.tcam_full++;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
mutex_lock(&dev->rdev.stats.lock);
- dev->rdev.stats.tcam_full++;
mutex_unlock(&dev->rdev.stats.lock);
send_fw_act_open_req(ep,
GET_TID_TID(GET_AOPEN_ATID(
@@ -2875,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{
u32 l2info;
- u16 vlantag, len, hdr_len;
+ u16 vlantag, len, hdr_len, eth_hdr_len;
u8 intf;
struct cpl_rx_pkt *cpl = cplhdr(skb);
struct cpl_pass_accept_req *req;
struct tcp_options_received tmp_opt;
+ struct c4iw_dev *dev;
+ dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
/* Store values from cpl_rx_pkt in temporary location. */
vlantag = (__force u16) cpl->vlan;
len = (__force u16) cpl->len;
@@ -2896,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
+ tcp_parse_options(skb, &tmp_opt, 0, NULL);
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
@@ -2904,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
V_SYN_MAC_IDX(G_RX_MACIDX(
(__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
+ eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
+ G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
+ G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
(__force int) htonl(l2info))) |
V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
(__force int) htons(hdr_len))) |
V_IP_HDR_LEN(G_RX_IPHDR_LEN(
(__force int) htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
- (__force int) htonl(l2info))));
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
@@ -2999,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
u16 window;
struct port_info *pi;
struct net_device *pdev;
- u16 rss_qid;
+ u16 rss_qid, eth_hdr_len;
int step;
u32 tx_chan;
struct neighbour *neigh;
@@ -3028,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
+ eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
+ G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
+ G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+ if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
} else {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80069ad595c..ae656016e1a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -41,10 +41,20 @@
#define DRV_VERSION "0.1"
MODULE_AUTHOR("Steve Wise");
-MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
+MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
+static int allow_db_fc_on_t5;
+module_param(allow_db_fc_on_t5, int, 0644);
+MODULE_PARM_DESC(allow_db_fc_on_t5,
+ "Allow DB Flow Control on T5 (default = 0)");
+
+static int allow_db_coalescing_on_t5;
+module_param(allow_db_coalescing_on_t5, int, 0644);
+MODULE_PARM_DESC(allow_db_coalescing_on_t5,
+ "Allow DB Coalescing on T5 (default = 0)");
+
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
@@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop)
{
return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
- infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
+ infop->vr->cq.size > 0;
}
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pci_name(infop->pdev));
return ERR_PTR(-ENOSYS);
}
+ if (!ocqp_supported(infop))
+ pr_info("%s: On-Chip Queues not supported on this device.\n",
+ pci_name(infop->pdev));
+
+ if (!is_t4(infop->adapter_type)) {
+ if (!allow_db_fc_on_t5) {
+ db_fc_threshold = 100000;
+ pr_info("DB Flow Control Disabled.\n");
+ }
+
+ if (!allow_db_coalescing_on_t5) {
+ db_coalescing_threshold = -1;
+ pr_info("DB Coalescing Disabled.\n");
+ }
+ }
+
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
int i;
if (!vers_printed++)
- printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
- DRV_VERSION);
+ pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
+ DRV_VERSION);
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7eec5e13fa8..485183ad34c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
}
-#define C4IW_WR_TO (10*HZ)
+#define C4IW_WR_TO (30*HZ)
struct c4iw_wr_wait {
struct completion completion;
@@ -369,7 +369,6 @@ struct c4iw_fr_page_list {
DEFINE_DMA_UNMAP_ADDR(mapping);
dma_addr_t dma_addr;
struct c4iw_dev *dev;
- int size;
};
static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -817,6 +816,15 @@ static inline int compute_wscale(int win)
return wscale;
}
+static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
+ return infop->vr->ocq.size > 0;
+#else
+ return 0;
+#endif
+}
+
u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
@@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
extern int c4iw_max_read_depth;
extern int db_fc_threshold;
+extern int db_coalescing_threshold;
+extern int use_dsgl;
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 903a92d6f91..4cb8eb24497 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -30,16 +30,76 @@
* SOFTWARE.
*/
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
#include "iw_cxgb4.h"
+int use_dsgl = 1;
+module_param(use_dsgl, int, 0644);
+MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
+
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+#define C4IW_INLINE_THRESHOLD 128
-static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+static int inline_threshold = C4IW_INLINE_THRESHOLD;
+module_param(inline_threshold, int, 0644);
+MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
+
+static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
+ u32 len, dma_addr_t data, int wait)
+{
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_sgl *sgl;
+ u8 wr_len;
+ int ret = 0;
+ struct c4iw_wr_wait wr_wait;
+
+ addr &= 0x7FFFFFF;
+
+ if (wait)
+ c4iw_init_wr_wait(&wr_wait);
+ wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ memset(req, 0, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ (wait ? FW_WR_COMPL(1) : 0));
+ req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
+ req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
+ req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(req + 1);
+ sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE(1));
+ sgl->len0 = cpu_to_be32(len);
+ sgl->addr0 = cpu_to_be64(data);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ return ret;
+ if (wait)
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ return ret;
+}
+
+static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
{
struct sk_buff *skb;
struct ulp_mem_io *req;
@@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
+ __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+
+ if (is_t4(rdev->lldi.adapter_type))
+ cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
+ else
+ cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
req->wr.wr_mid = cpu_to_be32(
FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
+ req->cmd = cmd;
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
@@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
+int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+{
+ u32 remain = len;
+ u32 dmalen;
+ int ret = 0;
+ dma_addr_t daddr;
+ dma_addr_t save;
+
+ daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
+ return -1;
+ save = daddr;
+
+ while (remain > inline_threshold) {
+ if (remain < T4_ULPTX_MAX_DMA) {
+ if (remain & ~T4_ULPTX_MIN_IO)
+ dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
+ else
+ dmalen = remain;
+ } else
+ dmalen = T4_ULPTX_MAX_DMA;
+ remain -= dmalen;
+ ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
+ !remain);
+ if (ret)
+ goto out;
+ addr += dmalen >> 5;
+ data += dmalen;
+ daddr += dmalen;
+ }
+ if (remain)
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+out:
+ dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
+ return ret;
+}
+
+/*
+ * write len bytes of data into addr (32B aligned address)
+ * If data is NULL, clear len byte of memory to zero.
+ */
+static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
+ if (len > inline_threshold) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ printk_ratelimited(KERN_WARNING
+ "%s: dma map"
+ " failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ return _c4iw_write_mem_inline(rdev, addr, len,
+ data);
+ } else
+ return 0;
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+}
+
/*
* Build and write a TPT entry.
* IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
@@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
struct c4iw_fr_page_list *c4pl;
struct c4iw_dev *dev = to_c4iw_dev(device);
dma_addr_t dma_addr;
- int size = sizeof *c4pl + page_list_len * sizeof(u64);
+ int pll_len = roundup(page_list_len * sizeof(u64), 32);
- c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
- &dma_addr, GFP_KERNEL);
+ c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
if (!c4pl)
return ERR_PTR(-ENOMEM);
+ c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
+ pll_len, &dma_addr,
+ GFP_KERNEL);
+ if (!c4pl->ibpl.page_list) {
+ kfree(c4pl);
+ return ERR_PTR(-ENOMEM);
+ }
dma_unmap_addr_set(c4pl, mapping, dma_addr);
c4pl->dma_addr = dma_addr;
c4pl->dev = dev;
- c4pl->size = size;
- c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
- c4pl->ibpl.max_page_list_len = page_list_len;
+ c4pl->ibpl.max_page_list_len = pll_len;
return &c4pl->ibpl;
}
@@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
{
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
- dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
- c4pl, dma_unmap_addr(c4pl, mapping));
+ dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
+ c4pl->ibpl.max_page_list_len,
+ c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
+ kfree(c4pl);
}
int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e084fdc6da7..7e94c9a656a 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
*/
if (addr >= rdev->oc_mw_pa)
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else {
+ if (is_t5(rdev->lldi.adapter_type))
+ vma->vm_page_prot =
+ t4_pgprot_wc(vma->vm_page_prot);
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ }
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
dev = to_c4iw_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
- props->hw_ver = dev->rdev.lldi.adapter_type;
+ props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
props->page_size_cap = T4_PAGESIZE_MASK;
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
+ return sprintf(buf, "%d\n",
+ CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 17ba4f8bc12..9fe6f1e8437 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -42,10 +42,21 @@ static int ocqp_support = 1;
module_param(ocqp_support, int, 0644);
MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
-int db_fc_threshold = 2000;
+int db_fc_threshold = 1000;
module_param(db_fc_threshold, int, 0644);
-MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
- "db flow control mode (default = 2000)");
+MODULE_PARM_DESC(db_fc_threshold,
+ "QP count/threshold that triggers"
+ " automatic db flow control mode (default = 1000)");
+
+int db_coalescing_threshold;
+module_param(db_coalescing_threshold, int, 0644);
+MODULE_PARM_DESC(db_coalescing_threshold,
+ "QP count/threshold that triggers"
+ " disabling db coalescing (default = 0)");
+
+static int max_fr_immd = T4_MAX_FR_IMMD;
+module_param(max_fr_immd, int, 0644);
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
{
@@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
- if (!ocqp_support || !t4_ocqp_supported())
+ if (!ocqp_support || !ocqp_supported(&rdev->lldi))
return -ENOSYS;
sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
if (!sq->dma_addr)
@@ -129,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
int wr_len;
struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
- int ret;
+ int ret = 0;
int eqsize;
wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -169,17 +180,14 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
}
if (user) {
- ret = alloc_oc_sq(rdev, &wq->sq);
- if (ret)
+ if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
goto free_hwaddr;
-
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_sq;
- } else
+ } else {
ret = alloc_host_sq(rdev, &wq->sq);
if (ret)
goto free_hwaddr;
+ }
+
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
@@ -532,7 +540,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
}
static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ struct ib_send_wr *wr, u8 *len16, u8 t5dev)
{
struct fw_ri_immd *imdp;
@@ -554,28 +562,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
0xffffffff);
- WARN_ON(pbllen > T4_MAX_FR_IMMD);
- imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
- imdp->op = FW_RI_DATA_IMMD;
- imdp->r1 = 0;
- imdp->r2 = 0;
- imdp->immdlen = cpu_to_be32(pbllen);
- p = (__be64 *)(imdp + 1);
- rem = pbllen;
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
- rem -= sizeof *p;
- if (++p == (__be64 *)&sq->queue[sq->size])
- p = (__be64 *)sq->queue;
- }
- BUG_ON(rem < 0);
- while (rem) {
- *p = 0;
- rem -= sizeof *p;
- if (++p == (__be64 *)&sq->queue[sq->size])
- p = (__be64 *)sq->queue;
+
+ if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ struct c4iw_fr_page_list *c4pl =
+ to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ struct fw_ri_dsgl *sglp;
+
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
+ cpu_to_be64((u64)
+ wr->wr.fast_reg.page_list->page_list[i]);
+ }
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ rem = pbllen;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ *p = cpu_to_be64(
+ (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ BUG_ON(rem < 0);
+ while (rem) {
+ *p = 0;
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
+ + pbllen, 16);
}
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
return 0;
}
@@ -676,7 +707,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_FAST_REG_MR:
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
+ err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
+ is_t5(
+ qhp->rhp->rdev.lldi.adapter_type) ?
+ 1 : 0);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -1448,6 +1482,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
rhp->db_state = NORMAL;
idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
}
+ if (db_coalescing_threshold >= 0)
+ if (rhp->qpcnt <= db_coalescing_threshold)
+ cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
spin_unlock_irq(&rhp->lock);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1559,11 +1596,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_lock_irq(&rhp->lock);
if (rhp->db_state != NORMAL)
t4_disable_wq_db(&qhp->wq);
- if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
+ rhp->qpcnt++;
+ if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = FLOW_CONTROL;
idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
}
+ if (db_coalescing_threshold >= 0)
+ if (rhp->qpcnt > db_coalescing_threshold)
+ cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
spin_unlock_irq(&rhp->lock);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 16f26ab2930..ebcb03bd1b7 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,7 @@ struct t4_status_page {
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
sizeof(struct fw_ri_immd)) & ~31UL)
-#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
#endif
}
-static inline int t4_ocqp_supported(void)
-{
-#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
- return 1;
-#else
- return 0;
-#endif
-}
-
enum {
T4_SQ_ONCHIP = (1<<0),
};
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 832bc807ed2..cc9f1927a32 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -469,8 +469,7 @@ static int capidrv_add_ack(struct capidrv_ncci *nccip,
{
struct ncci_datahandle_queue *n, **pp;
- n = (struct ncci_datahandle_queue *)
- kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
+ n = kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
if (!n) {
printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
return -1;
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
index 1bb291021fd..c7a94713e9e 100644
--- a/drivers/isdn/hisax/fsm.c
+++ b/drivers/isdn/hisax/fsm.c
@@ -26,7 +26,7 @@ FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
{
int i;
- fsm->jumpmatrix = (FSMFNPTR *)
+ fsm->jumpmatrix =
kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
if (!fsm->jumpmatrix)
return -ENOMEM;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 90f34ae2b80..dc4574f735e 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1479,7 +1479,7 @@ int setup_hfcsx(struct IsdnCard *card)
release_region(cs->hw.hfcsx.base, 2);
return (0);
}
- if (!(cs->hw.hfcsx.extra = (void *)
+ if (!(cs->hw.hfcsx.extra =
kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) {
release_region(cs->hw.hfcsx.base, 2);
printk(KERN_WARNING "HFC-SX: unable to allocate memory\n");
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c..a966128c2a7 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -32,13 +32,6 @@ config CAIF_SPI_SYNC
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
-config CAIF_SHM
- tristate "CAIF shared memory protocol driver"
- depends on CAIF && U5500_MBOX
- default n
- ---help---
- The CAIF shared memory protocol driver for the STE UX5500 platform.
-
config CAIF_HSI
tristate "CAIF HSI transport driver"
depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560..15a9d2fc753 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -7,9 +7,5 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
-# Shared memory
-caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
-obj-$(CONFIG_CAIF_SHM) += caif_shm.o
-
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
deleted file mode 100644
index 89d76b7b325..00000000000
--- a/drivers/net/caif/caif_shm_u5500.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <mach/mbox-db5500.h>
-#include <net/caif/caif_shm.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
-
-#define MAX_SHM_INSTANCES 1
-
-enum {
- MBX_ACC0,
- MBX_ACC1,
- MBX_DSP
-};
-
-static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
-
-static unsigned int shm_start;
-static unsigned int shm_size;
-
-module_param(shm_size, uint , 0440);
-MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
-
-module_param(shm_start, uint , 0440);
-MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
-
-static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
-{
- /* Always block until msg is written successfully */
- mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
- return 0;
-}
-
-static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
- void *pshm_drv)
-{
- /*
- * For UX5500, we have only 1 SHM instance which uses MBX0
- * for communication with the peer modem
- */
- pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
-
- if (!pshm_dev->hmbx)
- return -ENODEV;
- else
- return 0;
-}
-
-static int __init caif_shmdev_init(void)
-{
- int i, result;
-
- /* Loop is currently overkill, there is only one instance */
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
-
- shmdev_lyr[i].shm_base_addr = shm_start;
- shmdev_lyr[i].shm_total_sz = shm_size;
-
- if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
- || (shmdev_lyr[i].shm_total_sz <= 0)) {
- pr_warn("ERROR,"
- "Shared memory Address and/or Size incorrect"
- ", Bailing out ...\n");
- result = -EINVAL;
- goto clean;
- }
-
- pr_info("SHM AREA (instance %d) STARTS"
- " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
-
- shmdev_lyr[i].shm_id = i;
- shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
- shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
-
- /*
- * Finally, CAIF core module is called with details in place:
- * 1. SHM base address
- * 2. SHM size
- * 3. MBX handle
- */
- result = caif_shmcore_probe(&shmdev_lyr[i]);
- if (result) {
- pr_warn("ERROR[%d],"
- "Could not probe SHM core (instance %d)"
- " Bailing out ...\n", result, i);
- goto clean;
- }
- }
-
- return 0;
-
-clean:
- /*
- * For now, we assume that even if one instance of SHM fails, we bail
- * out of the driver support completely. For this, we need to release
- * any memory allocated and unregister any instance of SHM net device.
- */
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
- if (shmdev_lyr[i].pshm_netdev)
- unregister_netdev(shmdev_lyr[i].pshm_netdev);
- }
- return result;
-}
-
-static void __exit caif_shmdev_exit(void)
-{
- int i;
-
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
- caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
- kfree((void *)shmdev_lyr[i].shm_base_addr);
- }
-
-}
-
-module_init(caif_shmdev_init);
-module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
deleted file mode 100644
index bce8bac311c..00000000000
--- a/drivers/net/caif/caif_shmcore.c
+++ /dev/null
@@ -1,747 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
- * Daniel Martensson / daniel.martensson@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/io.h>
-
-#include <net/caif/caif_device.h>
-#include <net/caif/caif_shm.h>
-
-#define NR_TX_BUF 6
-#define NR_RX_BUF 6
-#define TX_BUF_SZ 0x2000
-#define RX_BUF_SZ 0x2000
-
-#define CAIF_NEEDED_HEADROOM 32
-
-#define CAIF_FLOW_ON 1
-#define CAIF_FLOW_OFF 0
-
-#define LOW_WATERMARK 3
-#define HIGH_WATERMARK 4
-
-/* Maximum number of CAIF buffers per shared memory buffer. */
-#define SHM_MAX_FRMS_PER_BUF 10
-
-/*
- * Size in bytes of the descriptor area
- * (With end of descriptor signalling)
- */
-#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
- sizeof(struct shm_pck_desc))
-
-/*
- * Offset to the first CAIF frame within a shared memory buffer.
- * Aligned on 32 bytes.
- */
-#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
-
-/* Number of bytes for CAIF shared memory header. */
-#define SHM_HDR_LEN 1
-
-/* Number of padding bytes for the complete CAIF frame. */
-#define SHM_FRM_PAD_LEN 4
-
-#define CAIF_MAX_MTU 4096
-
-#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
-#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
-
-#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
-#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
-
-#define SHM_FULL_MASK (0x0F << 0)
-#define SHM_EMPTY_MASK (0x0F << 4)
-
-struct shm_pck_desc {
- /*
- * Offset from start of shared memory area to start of
- * shared memory CAIF frame.
- */
- u32 frm_ofs;
- u32 frm_len;
-};
-
-struct buf_list {
- unsigned char *desc_vptr;
- u32 phy_addr;
- u32 index;
- u32 len;
- u32 frames;
- u32 frm_ofs;
- struct list_head list;
-};
-
-struct shm_caif_frm {
- /* Number of bytes of padding before the CAIF frame. */
- u8 hdr_ofs;
-};
-
-struct shmdrv_layer {
- /* caif_dev_common must always be first in the structure*/
- struct caif_dev_common cfdev;
-
- u32 shm_tx_addr;
- u32 shm_rx_addr;
- u32 shm_base_addr;
- u32 tx_empty_available;
- spinlock_t lock;
-
- struct list_head tx_empty_list;
- struct list_head tx_pend_list;
- struct list_head tx_full_list;
- struct list_head rx_empty_list;
- struct list_head rx_pend_list;
- struct list_head rx_full_list;
-
- struct workqueue_struct *pshm_tx_workqueue;
- struct workqueue_struct *pshm_rx_workqueue;
-
- struct work_struct shm_tx_work;
- struct work_struct shm_rx_work;
-
- struct sk_buff_head sk_qhead;
- struct shmdev_layer *pshm_dev;
-};
-
-static int shm_netdev_open(struct net_device *shm_netdev)
-{
- netif_wake_queue(shm_netdev);
- return 0;
-}
-
-static int shm_netdev_close(struct net_device *shm_netdev)
-{
- netif_stop_queue(shm_netdev);
- return 0;
-}
-
-int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
-{
- struct buf_list *pbuf;
- struct shmdrv_layer *pshm_drv;
- struct list_head *pos;
- u32 avail_emptybuff = 0;
- unsigned long flags = 0;
-
- pshm_drv = priv;
-
- /* Check for received buffers. */
- if (mbx_msg & SHM_FULL_MASK) {
- int idx;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check whether we have any outstanding buffers. */
- if (list_empty(&pshm_drv->rx_empty_list)) {
-
- /* Release spin lock. */
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* We print even in IRQ context... */
- pr_warn("No empty Rx buffers to fill: "
- "mbx_msg:%x\n", mbx_msg);
-
- /* Bail out. */
- goto err_sync;
- }
-
- pbuf =
- list_entry(pshm_drv->rx_empty_list.next,
- struct buf_list, list);
- idx = pbuf->index;
-
- /* Check buffer synchronization. */
- if (idx != SHM_GET_FULL(mbx_msg)) {
-
- /* We print even in IRQ context... */
- pr_warn(
- "phyif_shm_mbx_msg_cb: RX full out of sync:"
- " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
- idx, mbx_msg, SHM_GET_FULL(mbx_msg));
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Bail out. */
- goto err_sync;
- }
-
- list_del_init(&pbuf->list);
- list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Schedule RX work queue. */
- if (!work_pending(&pshm_drv->shm_rx_work))
- queue_work(pshm_drv->pshm_rx_workqueue,
- &pshm_drv->shm_rx_work);
- }
-
- /* Check for emptied buffers. */
- if (mbx_msg & SHM_EMPTY_MASK) {
- int idx;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check whether we have any outstanding buffers. */
- if (list_empty(&pshm_drv->tx_full_list)) {
-
- /* We print even in IRQ context... */
- pr_warn("No TX to empty: msg:%x\n", mbx_msg);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Bail out. */
- goto err_sync;
- }
-
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- idx = pbuf->index;
-
- /* Check buffer synchronization. */
- if (idx != SHM_GET_EMPTY(mbx_msg)) {
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* We print even in IRQ context... */
- pr_warn("TX empty "
- "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
-
- /* Bail out. */
- goto err_sync;
- }
- list_del_init(&pbuf->list);
-
- /* Reset buffer parameters. */
- pbuf->frames = 0;
- pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
-
- list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
-
- /* Check the available no. of buffers in the empty list */
- list_for_each(pos, &pshm_drv->tx_empty_list)
- avail_emptybuff++;
-
- /* Check whether we have to wake up the transmitter. */
- if ((avail_emptybuff > HIGH_WATERMARK) &&
- (!pshm_drv->tx_empty_available)) {
- pshm_drv->tx_empty_available = 1;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_ON);
-
-
- /* Schedule the work queue. if required */
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue,
- &pshm_drv->shm_tx_work);
- } else
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- }
-
- return 0;
-
-err_sync:
- return -EIO;
-}
-
-static void shm_rx_work_func(struct work_struct *rx_work)
-{
- struct shmdrv_layer *pshm_drv;
- struct buf_list *pbuf;
- unsigned long flags = 0;
- struct sk_buff *skb;
- char *p;
- int ret;
-
- pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
-
- while (1) {
-
- struct shm_pck_desc *pck_desc;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check for received buffers. */
- if (list_empty(&pshm_drv->rx_full_list)) {
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- break;
- }
-
- pbuf =
- list_entry(pshm_drv->rx_full_list.next, struct buf_list,
- list);
- list_del_init(&pbuf->list);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Retrieve pointer to start of the packet descriptor area. */
- pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
-
- /*
- * Check whether descriptor contains a CAIF shared memory
- * frame.
- */
- while (pck_desc->frm_ofs) {
- unsigned int frm_buf_ofs;
- unsigned int frm_pck_ofs;
- unsigned int frm_pck_len;
- /*
- * Check whether offset is within buffer limits
- * (lower).
- */
- if (pck_desc->frm_ofs <
- (pbuf->phy_addr - pshm_drv->shm_base_addr))
- break;
- /*
- * Check whether offset is within buffer limits
- * (higher).
- */
- if (pck_desc->frm_ofs >
- ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
- pbuf->len))
- break;
-
- /* Calculate offset from start of buffer. */
- frm_buf_ofs =
- pck_desc->frm_ofs - (pbuf->phy_addr -
- pshm_drv->shm_base_addr);
-
- /*
- * Calculate offset and length of CAIF packet while
- * taking care of the shared memory header.
- */
- frm_pck_ofs =
- frm_buf_ofs + SHM_HDR_LEN +
- (*(pbuf->desc_vptr + frm_buf_ofs));
- frm_pck_len =
- (pck_desc->frm_len - SHM_HDR_LEN -
- (*(pbuf->desc_vptr + frm_buf_ofs)));
-
- /* Check whether CAIF packet is within buffer limits */
- if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
- break;
-
- /* Get a suitable CAIF packet and copy in data. */
- skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
- frm_pck_len + 1);
-
- if (skb == NULL) {
- pr_info("OOM: Try next frame in descriptor\n");
- break;
- }
-
- p = skb_put(skb, frm_pck_len);
- memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = pshm_drv->pshm_dev->pshm_netdev;
-
- /* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
-
- if (!ret) {
- pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_packets++;
- pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_bytes += pck_desc->frm_len;
- } else
- ++pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_dropped;
- /* Move to next packet descriptor. */
- pck_desc++;
- }
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
- list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- }
-
- /* Schedule the work queue. if required */
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
-
-}
-
-static void shm_tx_work_func(struct work_struct *tx_work)
-{
- u32 mbox_msg;
- unsigned int frmlen, avail_emptybuff, append = 0;
- unsigned long flags = 0;
- struct buf_list *pbuf = NULL;
- struct shmdrv_layer *pshm_drv;
- struct shm_caif_frm *frm;
- struct sk_buff *skb;
- struct shm_pck_desc *pck_desc;
- struct list_head *pos;
-
- pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
-
- do {
- /* Initialize mailbox message. */
- mbox_msg = 0x00;
- avail_emptybuff = 0;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check for pending receive buffers. */
- if (!list_empty(&pshm_drv->rx_pend_list)) {
-
- pbuf = list_entry(pshm_drv->rx_pend_list.next,
- struct buf_list, list);
-
- list_del_init(&pbuf->list);
- list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
- /*
- * Value index is never changed,
- * so read access should be safe.
- */
- mbox_msg |= SHM_SET_EMPTY(pbuf->index);
- }
-
- skb = skb_peek(&pshm_drv->sk_qhead);
-
- if (skb == NULL)
- goto send_msg;
- /* Check the available no. of buffers in the empty list */
- list_for_each(pos, &pshm_drv->tx_empty_list)
- avail_emptybuff++;
-
- if ((avail_emptybuff < LOW_WATERMARK) &&
- pshm_drv->tx_empty_available) {
- /* Update blocking condition. */
- pshm_drv->tx_empty_available = 0;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_OFF);
- spin_lock_irqsave(&pshm_drv->lock, flags);
- }
- /*
- * We simply return back to the caller if we do not have space
- * either in Tx pending list or Tx empty list. In this case,
- * we hold the received skb in the skb list, waiting to
- * be transmitted once Tx buffers become available
- */
- if (list_empty(&pshm_drv->tx_empty_list))
- goto send_msg;
-
- /* Get the first free Tx buffer. */
- pbuf = list_entry(pshm_drv->tx_empty_list.next,
- struct buf_list, list);
- do {
- if (append) {
- skb = skb_peek(&pshm_drv->sk_qhead);
- if (skb == NULL)
- break;
- }
-
- frm = (struct shm_caif_frm *)
- (pbuf->desc_vptr + pbuf->frm_ofs);
-
- frm->hdr_ofs = 0;
- frmlen = 0;
- frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
-
- /* Add tail padding if needed. */
- if (frmlen % SHM_FRM_PAD_LEN)
- frmlen += SHM_FRM_PAD_LEN -
- (frmlen % SHM_FRM_PAD_LEN);
-
- /*
- * Verify that packet, header and additional padding
- * can fit within the buffer frame area.
- */
- if (frmlen >= (pbuf->len - pbuf->frm_ofs))
- break;
-
- if (!append) {
- list_del_init(&pbuf->list);
- append = 1;
- }
-
- skb = skb_dequeue(&pshm_drv->sk_qhead);
- if (skb == NULL)
- break;
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, pbuf->desc_vptr +
- pbuf->frm_ofs + SHM_HDR_LEN +
- frm->hdr_ofs, skb->len);
-
- pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
- pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
- frmlen;
- dev_kfree_skb_irq(skb);
-
- /* Fill in the shared memory packet descriptor area. */
- pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
- /* Forward to current frame. */
- pck_desc += pbuf->frames;
- pck_desc->frm_ofs = (pbuf->phy_addr -
- pshm_drv->shm_base_addr) +
- pbuf->frm_ofs;
- pck_desc->frm_len = frmlen;
- /* Terminate packet descriptor area. */
- pck_desc++;
- pck_desc->frm_ofs = 0;
- /* Update buffer parameters. */
- pbuf->frames++;
- pbuf->frm_ofs += frmlen + (frmlen % 32);
-
- } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
-
- /* Assign buffer as full. */
- list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
- append = 0;
- mbox_msg |= SHM_SET_FULL(pbuf->index);
-send_msg:
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- if (mbox_msg)
- pshm_drv->pshm_dev->pshmdev_mbxsend
- (pshm_drv->pshm_dev->shm_id, mbox_msg);
- } while (mbox_msg);
-}
-
-static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
-{
- struct shmdrv_layer *pshm_drv;
-
- pshm_drv = netdev_priv(shm_netdev);
-
- skb_queue_tail(&pshm_drv->sk_qhead, skb);
-
- /* Schedule Tx work queue. for deferred processing of skbs*/
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
-
- return 0;
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = shm_netdev_open,
- .ndo_stop = shm_netdev_close,
- .ndo_start_xmit = shm_netdev_tx,
-};
-
-static void shm_netdev_setup(struct net_device *pshm_netdev)
-{
- struct shmdrv_layer *pshm_drv;
- pshm_netdev->netdev_ops = &netdev_ops;
-
- pshm_netdev->mtu = CAIF_MAX_MTU;
- pshm_netdev->type = ARPHRD_CAIF;
- pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
- pshm_netdev->tx_queue_len = 0;
- pshm_netdev->destructor = free_netdev;
-
- pshm_drv = netdev_priv(pshm_netdev);
-
- /* Initialize structures in a clean state. */
- memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
-
- pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
-}
-
-int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
-{
- int result, j;
- struct shmdrv_layer *pshm_drv = NULL;
-
- pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
- "cfshm%d", shm_netdev_setup);
- if (!pshm_dev->pshm_netdev)
- return -ENOMEM;
-
- pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
- pshm_drv->pshm_dev = pshm_dev;
-
- /*
- * Initialization starts with the verification of the
- * availability of MBX driver by calling its setup function.
- * MBX driver must be available by this time for proper
- * functioning of SHM driver.
- */
- if ((pshm_dev->pshmdev_mbxsetup
- (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
- pr_warn("Could not config. SHM Mailbox,"
- " Bailing out.....\n");
- free_netdev(pshm_dev->pshm_netdev);
- return -ENODEV;
- }
-
- skb_queue_head_init(&pshm_drv->sk_qhead);
-
- pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
- " INSTANCE AT pshm_drv =0x%p\n",
- pshm_drv->pshm_dev->shm_id, pshm_drv);
-
- if (pshm_dev->shm_total_sz <
- (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
-
- pr_warn("ERROR, Amount of available"
- " Phys. SHM cannot accommodate current SHM "
- "driver configuration, Bailing out ...\n");
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
-
- pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
- pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
-
- if (pshm_dev->shm_loopback)
- pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
- else
- pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
- (NR_TX_BUF * TX_BUF_SZ);
-
- spin_lock_init(&pshm_drv->lock);
- INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
- INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
- INIT_LIST_HEAD(&pshm_drv->tx_full_list);
-
- INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
- INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
- INIT_LIST_HEAD(&pshm_drv->rx_full_list);
-
- INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
- INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
-
- pshm_drv->pshm_tx_workqueue =
- create_singlethread_workqueue("shm_tx_work");
- pshm_drv->pshm_rx_workqueue =
- create_singlethread_workqueue("shm_rx_work");
-
- for (j = 0; j < NR_TX_BUF; j++) {
- struct buf_list *tx_buf =
- kmalloc(sizeof(struct buf_list), GFP_KERNEL);
-
- if (tx_buf == NULL) {
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
- tx_buf->index = j;
- tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
- tx_buf->len = TX_BUF_SZ;
- tx_buf->frames = 0;
- tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
-
- if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
- else
- /*
- * FIXME: the result of ioremap is not a pointer - arnd
- */
- tx_buf->desc_vptr =
- ioremap(tx_buf->phy_addr, TX_BUF_SZ);
-
- list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
- }
-
- for (j = 0; j < NR_RX_BUF; j++) {
- struct buf_list *rx_buf =
- kmalloc(sizeof(struct buf_list), GFP_KERNEL);
-
- if (rx_buf == NULL) {
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
- rx_buf->index = j;
- rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
- rx_buf->len = RX_BUF_SZ;
-
- if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
- else
- rx_buf->desc_vptr =
- ioremap(rx_buf->phy_addr, RX_BUF_SZ);
- list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
- }
-
- pshm_drv->tx_empty_available = 1;
- result = register_netdev(pshm_dev->pshm_netdev);
- if (result)
- pr_warn("ERROR[%d], SHM could not, "
- "register with NW FRMWK Bailing out ...\n", result);
-
- return result;
-}
-
-void caif_shmcore_remove(struct net_device *pshm_netdev)
-{
- struct buf_list *pbuf;
- struct shmdrv_layer *pshm_drv = NULL;
-
- pshm_drv = netdev_priv(pshm_netdev);
-
- while (!(list_empty(&pshm_drv->tx_pend_list))) {
- pbuf =
- list_entry(pshm_drv->tx_pend_list.next,
- struct buf_list, list);
-
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->tx_full_list))) {
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->tx_empty_list))) {
- pbuf =
- list_entry(pshm_drv->tx_empty_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_full_list))) {
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_pend_list))) {
- pbuf =
- list_entry(pshm_drv->tx_pend_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_empty_list))) {
- pbuf =
- list_entry(pshm_drv->rx_empty_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- /* Destroy work queues. */
- destroy_workqueue(pshm_drv->pshm_tx_workqueue);
- destroy_workqueue(pshm_drv->pshm_rx_workqueue);
-
- unregister_netdev(pshm_netdev);
-}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a175d0be1ae..ee705771bd2 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev)
/* allocate a new skb for next time receive */
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
- if (!new_skb) {
- pr_notice("init: low on mem - packet dropped\n");
+ if (!new_skb)
goto init_error;
- }
+
skb_reserve(new_skb, NET_IP_ALIGN);
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
@@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 0be2195e503..269295403fc 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->tx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error3;
}
- memset(greth->tx_bd_base, 0, 1024);
-
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->rx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error4;
}
- memset(greth->rx_bd_base, 0, 1024);
-
/* Get MAC address from: module param, OF property or ID prom */
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 6e722dc37db..65926a95657 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- printk ("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3789affbc0e..0866e762743 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 60e2b701afe..9793767996a 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
dev->stats.rx_packets++;
} else {
am_writeword (dev, hdraddr + 2, RMD_OWN);
- printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 98f4522fd17..c178eb4c816 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 84219df72f5..e8d0ef508f4 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
@@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
- if (IS_ERR(atarilance_dev))
- return PTR_ERR(atarilance_dev);
- return 0;
+ return PTR_RET(atarilance_dev);
}
static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index de774d41914..688aede742c 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
if (skb == NULL) {
- netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index baca0bd1b39..3d86ffeb4e1 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == 0) {
- printk("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 9af3c307862..a51497c9d2a 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance;
int __init init_module(void)
{
dev_mvme147_lance = mvme147lance_probe(-1);
- if (IS_ERR(dev_mvme147_lance))
- return PTR_ERR(dev_mvme147_lance);
- return 0;
+ return PTR_RET(dev_mvme147_lance);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 013b6510853..26fc0ce0faa 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
- return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
+ return PTR_RET(dev_ni65);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 797f847edf1..ed213072764 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (skb == NULL) {
- netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 74b3891b648..4375abe61da 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
-
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
@@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev;
int __init init_module(void)
{
sun3lance_dev = sun3lance_probe(-1);
- if (IS_ERR(sun3lance_dev))
- return PTR_ERR(sun3lance_dev);
- return 0;
+ return PTR_RET(sun3lance_dev);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 6a40290d372..f47b780892e 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
dma_alloc_coherent(&op->dev,
sizeof(struct lance_init_block),
&lp->init_block_dvma, GFP_ATOMIC);
- if (!lp->init_block_mem) {
- printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
+ if (!lp->init_block_mem)
goto fail;
- }
+
lp->pio_buffer = 0;
lp->init_ring = lance_init_ring_dvma;
lp->rx = lance_rx_dvma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a206779c68c..4ce8ceb6220 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
/* Allocate the DMA ring buffers */
mp->tx_ring = dma_alloc_coherent(mp->device,
- N_TX_RING * MACE_BUFF_SIZE,
- &mp->tx_ring_phys, GFP_KERNEL);
- if (mp->tx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
+ N_TX_RING * MACE_BUFF_SIZE,
+ &mp->tx_ring_phys, GFP_KERNEL);
+ if (mp->tx_ring == NULL)
goto out1;
- }
mp->rx_ring = dma_alloc_coherent(mp->device,
- N_RX_RING * MACE_BUFF_SIZE,
- &mp->rx_ring_phys, GFP_KERNEL);
- if (mp->rx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
+ N_RX_RING * MACE_BUFF_SIZE,
+ &mp->rx_ring_phys, GFP_KERNEL);
+ if (mp->rx_ring == NULL)
goto out2;
- }
mace_dma_off(dev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 92f4734f860..e1f1b2a0673 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
- if (skb == NULL) {
- netdev_warn(netdev,
- "Memory squeeze, deferring packet\n");
+ if (skb == NULL)
goto skip_pkt;
- }
+
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1278b47022e..a046b6ff847 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
/* alloc new buffer */
skb = netdev_alloc_skb_ip_align(netdev, rx_size);
if (NULL == skb) {
- printk(KERN_WARNING
- "%s: Mem squeeze, deferring packet.\n",
- netdev->name);
/*
* Check that some rx space is free. If not,
* free one and mark stats->rx_dropped++.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e81..0b3e23ec37f 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
- unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- iomem_size = resource_size(res_mem);
- if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->base = ioremap(res_mem->start, iomem_size);
+ priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
if (priv->base == NULL) {
ret = -ENOMEM;
- goto out_release_mem;
+ goto out;
}
+
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
- goto out_unmap;
+ goto out;
}
- clk_enable(priv->mac_clk);
+ clk_prepare_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
- clk_enable(priv->phy_clk);
+ clk_prepare_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
return 0;
out_unregister_mdio:
- if (priv->mii_bus) {
+ if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
- }
out_free_mdio:
if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
-
-out_unmap:
- iounmap(priv->base);
-
-out_release_mem:
- release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
bcm_enet_mdio_write_mii);
}
- /* release device resources */
- iounmap(priv->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
/* disable hw block clocks */
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
- unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- iomem_size = resource_size(res);
- if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
- return -EBUSY;
-
- bcm_enet_shared_base = ioremap(res->start, iomem_size);
- if (!bcm_enet_shared_base) {
- release_mem_region(res->start, iomem_size);
+ bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!bcm_enet_shared_base)
return -ENOMEM;
- }
+
return 0;
}
static int bcm_enet_shared_remove(struct platform_device *pdev)
{
- struct resource *res;
-
- iounmap(bcm_enet_shared_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87..eec0af45b85 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
/* Alloc skb */
slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb) {
- bgmac_err(bgmac, "Allocation of skb failed!\n");
+ if (!slot->skb)
return -ENOMEM;
- }
/* Poison - if everything goes fine, hardware will overwrite it */
rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
};
/**************************************************
+ * MII
+ **************************************************/
+
+static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bgmac_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bgmac_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+static int bgmac_mii_register(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus;
+ int i, err = 0;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "bgmac mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
+ bgmac->core->core_unit);
+ mii_bus->priv = bgmac;
+ mii_bus->read = bgmac_mii_read;
+ mii_bus->write = bgmac_mii_write;
+ mii_bus->parent = &bgmac->core->dev;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
+
+ mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (!mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_free_bus;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ bgmac_err(bgmac, "Registration of mii bus failed\n");
+ goto err_free_irq;
+ }
+
+ bgmac->mii_bus = mii_bus;
+
+ return err;
+
+err_free_irq:
+ kfree(mii_bus->irq);
+err_free_bus:
+ mdiobus_free(mii_bus);
+ return err;
+}
+
+static void bgmac_mii_unregister(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus = bgmac->mii_bus;
+
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
+/**************************************************
* BCMA bus ops
**************************************************/
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ err = bgmac_mii_register(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register MDIO\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
err = -ENOTSUPP;
- goto err_dma_free;
+ goto err_mii_unregister;
}
netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
return 0;
+err_mii_unregister:
+ bgmac_mii_unregister(bgmac);
err_dma_free:
bgmac_dma_free(bgmac);
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
+ bgmac_mii_unregister(bgmac);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f..98d4b5fcc07 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
struct net_device *net_dev;
struct napi_struct napi;
+ struct mii_bus *mii_bus;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6..e709296e3b8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ &bp->status_blk_mapping,
+ GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL)
goto alloc_mem_err;
- memset(status_blk, 0, bp->status_stats_size);
-
bnapi = &bp->bnx2_napi[0];
bnapi->status_blk.msi = status_blk;
bnapi->hw_tx_cons_ptr =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a96508..a4729c73ab8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
-#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
* START_BD - describes packed
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
+ * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags
*/
-#define BDS_PER_TX_PKT 3
+#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset))
-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
-#define XMIT_PLAIN 0
-#define XMIT_CSUM_V4 0x1
-#define XMIT_CSUM_V6 0x2
-#define XMIT_CSUM_TCP 0x4
-#define XMIT_GSO_V4 0x8
-#define XMIT_GSO_V6 0x10
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
-#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
-#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
/* stuff added to make the code fit 80Col */
#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -1215,6 +1221,7 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
};
@@ -1281,6 +1288,8 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
@@ -1944,12 +1953,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x) \
- memset(x, 0, size); \
- } while (0)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2286,7 +2292,7 @@ static const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c..db6912b0999 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* Compute number of aggregated segments, and gso_type.
*/
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
- u16 len_on_bd, unsigned int pkt_len)
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
{
/* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
- NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
- skb_shinfo(skb)->gso_size);
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
- le16_to_cpu(cqe->pkt_len));
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -3086,11 +3087,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
*/
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata,
- struct sw_tx_bd *tx_buf,
- struct eth_tx_start_bd **tx_bd, u16 hlen,
- u16 bd_prod, int nbd)
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
{
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd;
@@ -3098,11 +3099,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */
- h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
- h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -3131,7 +3131,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
-static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
__sum16 tsum = (__force __sum16) csum;
@@ -3146,30 +3146,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
return bswab16(tsum);
}
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{
u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- rc = XMIT_PLAIN;
+ return XMIT_PLAIN;
- else {
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- rc = XMIT_CSUM_V6;
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- rc |= XMIT_CSUM_TCP;
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
} else {
- rc = XMIT_CSUM_V4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
}
}
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb))
- rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- else if (skb_is_gso(skb))
- rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else if (skb_is_gso(skb)) {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
return rc;
}
@@ -3254,14 +3271,23 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
+ struct ipv6hdr *ipv6;
+
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
- if ((xmit_type & XMIT_GSO_V6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else if (xmit_type & XMIT_GSO_V6)
+ ipv6 = ipv6_hdr(skb);
+ else
+ ipv6 = NULL;
+
+ if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
@@ -3272,13 +3298,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD
* @xmit_type: xmit flags
*/
-static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3298,6 +3324,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
}
/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data;
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+/**
* bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
*
* @bp: driver handle
@@ -3305,15 +3365,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated
* @xmit_type: xmit flags
*
- * 57712 related
+ * 57712/578xx related
*/
-static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3328,17 +3388,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
-static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3352,9 +3410,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated
* @xmit_type: xmit flags
*/
-static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
@@ -3400,6 +3458,72 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
return hlen;
}
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 inner_hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+
+ /* IP len */
+ inner_hlen_w = (skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb)) >> 1;
+
+ /* transport len */
+ if (xmit_type & XMIT_CSUM_TCP)
+ inner_hlen_w += inner_tcp_hdrlen(skb) >> 1;
+ else
+ inner_hlen_w += sizeof(struct udphdr) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = inner_hlen_w;
+
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
+ struct iphdr *iph = inner_ip_hdr(skb);
+
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((~iph->check) -
+ iph->tot_len - iph->frag_off));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ inner_hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd2->hw_ip_id = bswab16(ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ outerip_len = ip_hdr(skb)->ihl << 1;
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (!!(xmit_type & XMIT_CSUM_V6) <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3415,6 +3539,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, txq_index;
@@ -3482,7 +3607,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */
@@ -3530,12 +3655,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_PARSE_NBDS,
- 0);
- /* header nbd */
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3555,19 +3677,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
-#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp)) {
-#endif
+ if (IS_VF(bp))
tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto));
-#ifndef BNX2X_STOP_ON_ERROR
- } else {
+ else
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
- }
-#endif
}
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3577,23 +3696,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!CHIP_IS_E1x(bp)) {
pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- /* Set PBD in checksum offload case */
- if (xmit_type & XMIT_CSUM)
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
+ }
- if (IS_MF_SI(bp) || IS_VF(bp)) {
- /* fill in the MAC addresses in the PBD - for local
- * switching
- */
- bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- &pbd_e2->src_mac_addr_mid,
- &pbd_e2->src_mac_addr_lo,
+ /* Add the macs to the parsing BD this is a vf */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
eth->h_source);
- bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- &pbd_e2->dst_mac_addr_mid,
- &pbd_e2->dst_mac_addr_lo,
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
}
@@ -3615,14 +3769,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED,
- "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@ -3635,10 +3788,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen))
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen,
- bd_prod, ++nbd);
+ bd_prod);
+ }
if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
@@ -3728,9 +3883,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
- pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c..f9098d8fc25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0, size); \
- } while (0)
+#define BNX2X_PCI_ALLOC(x, y, size) \
+do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
@@ -496,7 +496,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +837,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +847,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +973,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+
return bnx2x_func_state_change(bp, &func_params);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f..324d6913af6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3232,7 +3232,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev)
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ if (IS_PF(bp))
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ else /* vf */
+ SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c9..40f22c6794c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[315].base + ((pfId) * IRO[315].m1))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
- (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
- IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc34..12f00a40cdf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
#define EPIO_CFG_EPIO30 0x0000001f
#define EPIO_CFG_EPIO31 0x00000020
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
- u32 reserved0[6]; /* 0x178 */
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ u32 reserved0[5]; /* 0x17c */
u32 aeu_int_mask; /* 0x190 */
@@ -2821,8 +2840,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 2
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
-#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
-#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
u8 default_vlan_flg;
u8 force_default_pri_flg;
- __le32 reserved3;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 reserved1;
};
/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
__le16 silent_vlan_mask;
u8 silent_vlan_removal_flg;
u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ __le32 reserved1;
__le32 echo;
};
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
*/
struct eth_classify_mac_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
*/
struct eth_classify_pair_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
/*
- * Command for setting multicast classification for a client
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved1;
+ u8 ip_hdr_start_inner_w;
+ __le16 pseudo_csum;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 reserved1;
+#endif
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
*/
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
struct eth_multicast_rules_cmd {
u8 cmd_general_data;
#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
struct regpair reserved3;
};
-
/*
* parameters for multicast classification ramrod
*/
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
-
/*
* Place holder for ramrods protocol specific data
*/
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
- __le32 __reserved2;
+ __le16 udp_4tuple_dst_port_mask;
+ __le16 udp_4tuple_dst_port_value;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
MAX_ETH_TPA_UPDATE_COMMAND
};
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_pcsum_location {
+ PCSUM_ON_PKT,
+ PCSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+};
/*
* Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
-#define ETH_TX_START_BD_RESREVED (0x1<<7)
-#define ETH_TX_START_BD_RESREVED_SHIFT 7
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
};
/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
+ union eth_mac_addr_or_tunnel_data data;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
};
/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
+ * Tx 2nd parsing BD structure for ETH packet
*/
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
+ __le16 reserved1;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved2;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
struct eth_tx_next_bd {
__le32 addr_lo;
__le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
struct eth_tx_next_bd next_bd;
};
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
MAX_COMMON_SPQE_CMD_ID
};
-
/*
* Per-protocol connection types
*/
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
*/
struct malicious_vf_event_data {
u8 vf_id;
- u8 reserved0;
+ u8 err_id;
u16 reserved1;
u32 reserved2;
u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
MAX_EVENT_RING_OPCODE
};
-
/*
* Modes for fairness algorithm
*/
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
*/
struct function_start_data {
u8 function_mode;
- u8 reserved;
+ u8 allow_npar_tx_switching;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ __le16 reserved1[2];
};
-
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
u8 lb_mode_en;
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
- __le16 reserved1;
+ u8 reserved1;
+ u8 update_gre_cfg_flg;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ u32 reserved3;
};
-
/*
* FW version stored in the Xstorm RAM
*/
@@ -5061,6 +5216,22 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
+/* GRE RSS Mode */
+enum gre_rss_mode {
+ GRE_OUTER_HEADERS_RSS,
+ GRE_INNER_HEADERS_RSS,
+ NVGRE_KEY_ENTROPY_RSS,
+ MAX_GRE_RSS_MODE
+};
+
+/* GRE Tunnel Mode */
+enum gre_tunnel_type {
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL,
+ L2GRE_TUNNEL,
+ IPGRE_TUNNEL,
+ MAX_GRE_TUNNEL_TYPE
+};
/*
* Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
MAX_IP_VER
};
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
/*
* Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
-
/*
* The send queue element
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64..e3fa8080739 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -152,6 +152,7 @@
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3629,6 +3630,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -3753,20 +3764,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Set Transmit PMD settings */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- ((0x02 <<
- MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3909,6 +3913,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3958,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
- tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
-
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
} else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
misc1_val |= 0x9;
- tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
+ 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4121,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
/* Set Transmit PMD settings */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
- ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
- MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -8049,20 +8061,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
- phy->req_line_speed = SPEED_1000;
- if (!CHIP_IS_E1x(bp))
- gport = BP_PATH(bp) + (params->port << 1);
- netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
- " Current SFP module in port %d is not"
- " compliant with 10G Ethernet\n",
- gport);
+ if (phy->req_line_speed != SPEED_1000) {
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -10281,7 +10297,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
bnx2x_eee_an_resolve(phy, params, vars);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8c..4902d1eb3d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -2955,14 +2953,15 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the
- * parent connection). The statistics are zeroed when the parent
- * connection is initialized.
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
*/
__set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
#ifdef BNX2X_STOP_ON_ERROR
__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3226,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
-
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
ether_stat->mtu_size = bp->dev->mtu;
-
if (bp->dev->features & NETIF_F_RXCSUM)
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3270,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3372,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -9525,6 +9536,10 @@ sp_rtnl_not_reset:
bnx2x_vfpf_storm_rx_mode(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9532,8 +9547,10 @@ sp_rtnl_not_reset:
/* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- &bp->sp_rtnl_state))
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
bnx2x_enable_sriov(bp);
+ }
}
static void bnx2x_period_task(struct work_struct *work)
@@ -10034,8 +10051,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
id = ((val & 0xffff) << 16);
val = REG_RD(bp, MISC_REG_CHIP_REV);
id |= ((val & 0xf) << 12);
- val = REG_RD(bp, MISC_REG_CHIP_METAL);
- id |= ((val & 0xff) << 4);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
@@ -10812,14 +10833,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
}
}
- if (IS_MF_STORAGE_SD(bp))
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
- /* use FIP MAC as primary MAC */
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
+ if (IS_MF_FCOE_AFEX(bp))
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11056,6 +11075,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11402,26 +11424,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
-static int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm failes) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held).
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
- return 0;
-}
-
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11791,6 +11793,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_setup_tc = bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11953,7 +11957,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
- bnx2x_set_ethtool_ops(dev);
+ bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
@@ -11961,6 +11965,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ if (!CHIP_IS_E1x(bp)) {
+ dev->hw_features |= NETIF_F_GSO_GRE;
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE;
+ }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -12475,13 +12486,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
- /* Enable SRIOV if capability found in configuration space.
- * Once the generic SR-IOV framework makes it in from the
- * pci tree this will be revised, to allow dynamic control
- * over the number of VFs. Right now, change the num of vfs
- * param below to enable SR-IOV.
- */
- rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
goto init_one_exit;
@@ -12493,16 +12499,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */
- switch (ent->driver_data) {
- case BCM57840_O:
- case BCM57840_4_10:
- case BCM57840_2_20:
- case BCM57840_MFO:
- case BCM57840_MF:
- bp->flags |= NO_FCOE_FLAG;
- }
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -12797,6 +12793,9 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
};
static int __init bnx2x_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d5301..d22bc40091e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
Port. */
#define MISC_REG_BOND_ID 0xa400
-/* [R 8] These bits indicate the metal revision of the chip. This value
- starts at 0x00 for each all-layer tape-out and increments by one for each
- tape-out. */
-#define MISC_REG_CHIP_METAL 0xa404
/* [R 16] These bits indicate the part number for the chip. */
#define MISC_REG_CHIP_NUM 0xa408
/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90..5bdc1d6dcf8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
#define BNX2X_MAX_EMUL_MULTI 16
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/**** Exe Queue interfaces ****/
/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf)
+ int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
- u8 *next = buf;
+ u8 *next = base;
int counter = 0;
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
if (counter < n) {
- /* place leading zeroes in buffer */
- memset(next, 0, MAC_LEADING_ZERO_CNT);
-
- /* place mac after leading zeroes*/
- memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
- ETH_ALEN);
-
- /* calculate address of next element and
- * advance counter
- */
+ memcpy(next, &pos->u, size);
counter++;
- next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
- DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
- counter, next, pos->u.mac.mac);
}
}
return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return pos;
return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
-
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.
+ vlan_mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return rc;
}
list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
}
}
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
vlan_obj->check_move = bnx2x_check_move;
vlan_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,10 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
+ PCSUM_ON_BD;
+
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5679,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
- rdata->network_cos_mode = start_params->network_cos_mode;
-
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9f..35479da11f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
};
struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
u16 vlan;
};
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
*
* @return number of copied bytes
*/
- int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf);
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
/**
* Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,8 @@ enum {
BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
- BNX2X_Q_FLG_FORCE_DEFAULT_PRI
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_PCSUM_ON_PKT
};
/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +846,7 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS_E3B0 3
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
struct bnx2x_queue_init_params {
struct {
@@ -1118,6 +1123,15 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
+
+ /* NVGRE classification enablement */
+ u8 nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ u8 gre_tunnel_rss;
};
struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa209358..faadd153f7a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc >= 0) {
cnt += pos->add ? 1 : -1;
- list_del(&pos->link);
- list_add(&pos->link, &rollback_list);
+ list_move(&pos->link, &rollback_list);
rc = 0;
} else if (rc == -EEXIST) {
rc = 0;
@@ -958,6 +959,12 @@ op_err:
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QSETUP_DONE:
+ vf->cfg_flags |= VF_CFG_VLAN;
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
return bnx2x_is_pcie_pending(dev);
unknown_dev:
- BNX2X_ERR("Unknown device\n");
return false;
}
@@ -1964,8 +1970,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
if (iov->total == 0)
goto failed;
- /* calculate the actual number of VFs */
- iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -3012,21 +3020,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->op_current = CHANNEL_TLV_NONE;
}
-void bnx2x_enable_sriov(struct bnx2x *bp)
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
- int rc = 0;
- /* disbale sriov in case it is still enabled */
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
+}
- /* enable sriov */
- DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
- rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf *vf)
+{
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ if (!vf) {
+ BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ int rc;
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
if (rc)
- BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
- else
- DP(BNX2X_MSG_IOV, "sriov enabled\n");
+ return rc;
+ if (!mac_obj || !vlan_obj || !bulletin) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->spoofchk = 1; /*always enabled */
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
+ 0, VLAN_HLEN);
+ } else {
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* funtion has not been loaded yet. Show mac as 0s */
+ memset(&ivi->mac, 0, ETH_ALEN);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* funtion has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+ }
+
+ return 0;
}
/* New mac for VF. Consider these cases:
@@ -3044,23 +3169,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
- * again after consulting its bulletin board
+ * again after consulting its bulletin board.
*/
-int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state, vfidx = queue;
+ int rc, q_logical_state;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
- /* if SRIOV is disabled there is nothing to do (and somewhere, someone
- * has erred).
- */
- if (!IS_SRIOV(bp)) {
- BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
- return -EINVAL;
- }
-
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
@@ -3085,7 +3206,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
- unsigned long flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3227,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
}
/* configure the new mac to device */
- __set_bit(RAMROD_COMP_WAIT, &flags);
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
- BNX2X_ETH_MAC, &flags);
+ BNX2X_ETH_MAC, &ramrod_flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return rc;
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does.
+ */
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ bulletin->vlan = vlan;
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the vlan in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ unsigned long vlan_mac_flags = 0;
+ struct bnx2x_vlan_mac_obj *vlan_obj =
+ &bnx2x_vfq(vf, 0, vlan_obj);
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ return -EINVAL;
+ }
+
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure the new vlan to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ return rc;
+ }
+
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured th Vlan before vf was
+ * and we were called because the VF came up later
+ */
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ }
+ return 0;
}
/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,6 +3405,10 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
+ /* the vlan in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << VLAN_VALID)
+ memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
+
/* copy new bulletin board to bp */
bp->old_bulletin = bulletin;
@@ -3196,3 +3440,26 @@ alloc_mem_err:
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
+
+int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held). We only want to do this if the number of VFs
+ * was set before PF driver was loaded.
+ */
+ if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add..a10bdb2fd90 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
#define VF_CFG_TPA 0x0004
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
+#define VF_CFG_VLAN 0x0020
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -752,11 +753,15 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
void bnx2x_vf_map_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
-void bnx2x_enable_sriov(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -779,7 +784,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
-static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -804,6 +810,9 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00..41708faab57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
u8 mac[ETH_ALEN];
- u8 padding[2];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
};
union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375c..e80bfb60c3e 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
- if (sb_new == NULL) {
- pr_info("%s: sk_buff allocation failed\n",
- d->sbdma_eth->sbm_dev->name);
+ if (sb_new == NULL)
return -ENOBUFS;
- }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 67d2663b397..dea7d7d1f73 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG357766 "tigon/tg357766.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -3448,11 +3449,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
#define TX_CPU_SCRATCH_SIZE 0x04000
/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
{
int i;
+ const int iters = 10000;
- BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+ for (i = 0; i < iters; i++) {
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_rxcpu_pause(struct tg3 *tp)
+{
+ int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
+
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+
+ return rc;
+}
+
+/* tp->lock is held. */
+static int tg3_txcpu_pause(struct tg3 *tp)
+{
+ return tg3_pause_cpu(tp, TX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+}
+
+/* tp->lock is held. */
+static void tg3_rxcpu_resume(struct tg3 *tp)
+{
+ tg3_resume_cpu(tp, RX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ int rc;
+
+ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3508,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
return 0;
}
- if (offset == RX_CPU_BASE) {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
-
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
- udelay(10);
+ if (cpu_base == RX_CPU_BASE) {
+ rc = tg3_rxcpu_pause(tp);
} else {
/*
* There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3518,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
if (tg3_flag(tp, IS_SSB_CORE))
return 0;
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
+ rc = tg3_txcpu_pause(tp);
}
- if (i >= 10000) {
+ if (rc) {
netdev_err(tp->dev, "%s timed out, %s CPU\n",
- __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -3499,19 +3533,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
return 0;
}
-struct fw_info {
- unsigned int fw_base;
- unsigned int fw_len;
- const __be32 *fw_data;
-};
+static int tg3_fw_data_len(struct tg3 *tp,
+ const struct tg3_firmware_hdr *fw_hdr)
+{
+ int fw_len;
+
+ /* Non fragmented firmware have one firmware header followed by a
+ * contiguous chunk of data to be written. The length field in that
+ * header is not the length of data to be written but the complete
+ * length of the bss. The data length is determined based on
+ * tp->fw->size minus headers.
+ *
+ * Fragmented firmware have a main header followed by multiple
+ * fragments. Each fragment is identical to non fragmented firmware
+ * with a firmware header followed by a contiguous chunk of data. In
+ * the main header, the length field is unused and set to 0xffffffff.
+ * In each fragment header the length is the entire size of that
+ * fragment i.e. fragment data + header length. Data length is
+ * therefore length field in the header minus TG3_FW_HDR_LEN.
+ */
+ if (tp->fw_len == 0xffffffff)
+ fw_len = be32_to_cpu(fw_hdr->len);
+ else
+ fw_len = tp->fw->size;
+
+ return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
+}
/* tp->lock is held. */
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
u32 cpu_scratch_base, int cpu_scratch_size,
- struct fw_info *info)
+ const struct tg3_firmware_hdr *fw_hdr)
{
- int err, lock_err, i;
+ int err, i;
void (*write_op)(struct tg3 *, u32, u32);
+ int total_len = tp->fw->size;
if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
netdev_err(tp->dev,
@@ -3520,30 +3576,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
return -EINVAL;
}
- if (tg3_flag(tp, 5705_PLUS))
+ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
write_op = tg3_write_mem;
else
write_op = tg3_write_indirect_reg32;
- /* It is possible that bootcode is still loading at this point.
- * Get the nvram lock first before halting the cpu.
- */
- lock_err = tg3_nvram_lock(tp);
- err = tg3_halt_cpu(tp, cpu_base);
- if (!lock_err)
- tg3_nvram_unlock(tp);
- if (err)
- goto out;
+ if (tg3_asic_rev(tp) != ASIC_REV_57766) {
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ int lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
- for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
- write_op(tp, cpu_scratch_base + i, 0);
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
- for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
- write_op(tp, (cpu_scratch_base +
- (info->fw_base & 0xffff) +
- (i * sizeof(u32))),
- be32_to_cpu(info->fw_data[i]));
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE,
+ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
+ } else {
+ /* Subtract additional main header for fragmented firmware and
+ * advance to the first fragment
+ */
+ total_len -= TG3_FW_HDR_LEN;
+ fw_hdr++;
+ }
+
+ do {
+ u32 *fw_data = (u32 *)(fw_hdr + 1);
+ for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
+ write_op(tp, cpu_scratch_base +
+ (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
+ (i * sizeof(u32)),
+ be32_to_cpu(fw_data[i]));
+
+ total_len -= be32_to_cpu(fw_hdr->len);
+
+ /* Advance to next fragment */
+ fw_hdr = (struct tg3_firmware_hdr *)
+ ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
+ } while (total_len > 0);
err = 0;
@@ -3552,13 +3627,33 @@ out:
}
/* tp->lock is held. */
+static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
+{
+ int i;
+ const int iters = 5;
+
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, pc);
+
+ for (i = 0; i < iters; i++) {
+ if (tr32(cpu_base + CPU_PC) == pc)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, pc);
+ udelay(1000);
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
- int err, i;
+ const struct tg3_firmware_hdr *fw_hdr;
+ int err;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3566,60 +3661,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup only the RX cpu. */
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
- break;
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
"should be %08x\n", __func__,
- tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ tr32(RX_CPU_BASE + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ tg3_rxcpu_resume(tp);
+
+ return 0;
+}
+
+static int tg3_validate_rxcpu_state(struct tg3 *tp)
+{
+ const int iters = 1000;
+ int i;
+ u32 val;
+
+ /* Wait for boot code to complete initialization and enter service
+ * loop. It is then safe to download service patches
+ */
+ for (i = 0; i < iters; i++) {
+ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
+ break;
+
+ udelay(10);
+ }
+
+ if (i == iters) {
+ netdev_err(tp->dev, "Boot code not ready for service patches\n");
+ return -EBUSY;
+ }
+
+ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
+ if (val & 0xff) {
+ netdev_warn(tp->dev,
+ "Other patches exist. Not downloading EEE patch\n");
+ return -EEXIST;
+ }
return 0;
}
/* tp->lock is held. */
+static void tg3_load_57766_firmware(struct tg3 *tp)
+{
+ struct tg3_firmware_hdr *fw_hdr;
+
+ if (!tg3_flag(tp, NO_NVRAM))
+ return;
+
+ if (tg3_validate_rxcpu_state(tp))
+ return;
+
+ if (!tp->fw)
+ return;
+
+ /* This firmware blob has a different format than older firmware
+ * releases as given below. The main difference is we have fragmented
+ * data to be written to non-contiguous locations.
+ *
+ * In the beginning we have a firmware header identical to other
+ * firmware which consists of version, base addr and length. The length
+ * here is unused and set to 0xffffffff.
+ *
+ * This is followed by a series of firmware fragments which are
+ * individually identical to previous firmware. i.e. they have the
+ * firmware header and followed by data for that fragment. The version
+ * field of the individual fragment header is unused.
+ */
+
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+ if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
+ return;
+
+ if (tg3_rxcpu_pause(tp))
+ return;
+
+ /* tg3_load_firmware_cpu() will always succeed for the 57766 */
+ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
+
+ tg3_rxcpu_resume(tp);
+}
+
+/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
- int err, i;
+ int err;
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
+ if (!tg3_flag(tp, FW_TSO))
return 0;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3627,10 +3779,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
cpu_scratch_size = tp->fw_len;
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
@@ -3643,30 +3792,22 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
err = tg3_load_firmware_cpu(tp, cpu_base,
cpu_scratch_base, cpu_scratch_size,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup the cpu. */
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(cpu_base + CPU_PC) == info.fw_base)
- break;
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev,
"%s fails to set CPU PC, is %08x should be %08x\n",
- __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ __func__, tr32(cpu_base + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_MODE, 0x00000000);
+
+ tg3_resume_cpu(tp, cpu_base);
return 0;
}
@@ -8039,11 +8180,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->rx_rcb)
goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
@@ -8093,12 +8232,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats)
goto err_out;
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
-
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
@@ -8106,11 +8243,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE,
&tnapi->status_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->hw_status)
goto err_out;
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) {
@@ -9781,6 +9917,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ /* Ignore any errors for the firmware download. If download
+ * fails, the device will operate with EEE disabled
+ */
+ tg3_load_57766_firmware(tp);
+ }
+
if (tg3_flag(tp, TSO_CAPABLE)) {
err = tg3_load_tso_firmware(tp);
if (err)
@@ -10570,7 +10713,7 @@ static int tg3_test_msi(struct tg3 *tp)
static int tg3_request_firmware(struct tg3 *tp)
{
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10721,15 @@ static int tg3_request_firmware(struct tg3 *tp)
return -ENOENT;
}
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
* start address and _full_ length including BSS sections
* (which must be longer than the actual data, of course
*/
- tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
- if (tp->fw_len < (tp->fw->size - 12)) {
+ tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
@@ -10885,7 +11028,15 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ if (err) {
+ netdev_warn(tp->dev, "EEE capability disabled\n");
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+ netdev_warn(tp->dev, "EEE capability restored\n");
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ }
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -14515,6 +14666,7 @@ static int tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
(tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5762 ||
(tg3_asic_rev(tp) == ASIC_REV_5717 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -15297,7 +15449,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- tg3_flag_set(tp, TSO_BUG);
+ tg3_flag_set(tp, FW_TSO);
+ tg3_flag_set(tp, TSO_BUG);
if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
@@ -15308,7 +15461,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- tp->fw_needed) {
+ tg3_flag(tp, FW_TSO)) {
/* For firmware TSO, assume ASF is disabled.
* We'll disable TSO later if we discover ASF
* is enabled in tg3_get_eeprom_hw_cfg().
@@ -15323,6 +15476,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ tp->fw_needed = FIRMWARE_TG357766;
+
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
@@ -15595,7 +15751,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
tg3_get_eeprom_hw_cfg(tp);
- if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -15783,6 +15939,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
udelay(50);
tg3_nvram_init(tp);
+ /* If the device has an NVRAM, no need to load patch firmware */
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
+ !tg3_flag(tp, NO_NVRAM))
+ tp->fw_needed = NULL;
+
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d..1cdc1b641c7 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2222,6 +2222,12 @@
#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
+#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
+#define TG3_57766_FW_BASE_ADDR 0x00030000
+#define TG3_57766_FW_HANDSHAKE 0x0003fccc
+#define TG3_SBROM_IN_SERVICE_LOOP 0x51
+
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -3009,17 +3015,18 @@ enum TG3_FLAGS {
TG3_FLAG_JUMBO_CAPABLE,
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
- TG3_FLAG_TSO_BUG,
TG3_FLAG_MAX_RXPEND_64,
- TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
TG3_FLAG_ASF_NEW_HANDSHAKE,
TG3_FLAG_HW_AUTONEG,
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
+ TG3_FLAG_FW_TSO,
TG3_FLAG_HW_TSO_1,
TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_TSO_BUG,
TG3_FLAG_ICH_WORKAROUND,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3071,13 @@ enum TG3_FLAGS {
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
};
+struct tg3_firmware_hdr {
+ __be32 version; /* unused for fragments */
+ __be32 base_addr;
+ __be32 len;
+};
+#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
+
struct tg3 {
/* begin "general, frequently-used members" cacheline section */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 3227fdde521..f2b73ffa912 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7cce42dc2f2..d588f842d55 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
dma_alloc_coherent(&bnad->pcidev->dev,
- mem_info->len, &dma_pa,
- GFP_KERNEL);
-
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 3becdb2deb4..c6e40d65a3d 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring) {
- netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+ (MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc)),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring)
return -ENOMEM;
- }
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) {
- netdev_err(dev, "unable to alloc rx data DMA buffer\n");
-
dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- lp->rx_ring, lp->rx_ring_dma);
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
}
@@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev)
netif_rx(skb);
} else {
lp->stats.rx_dropped++;
- netdev_notice(dev, "Memory squeeze, dropping packet.\n");
}
if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
@@ -519,18 +515,7 @@ static struct platform_driver at91ether_driver = {
},
};
-static int __init at91ether_init(void)
-{
- return platform_driver_probe(&at91ether_driver, at91ether_probe);
-}
-
-static void __exit at91ether_exit(void)
-{
- platform_driver_unregister(&at91ether_driver);
-}
-
-module_init(at91ether_init)
-module_exit(at91ether_exit)
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 79039439bfd..3a5d680ff8f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1737,18 +1737,7 @@ static struct platform_driver macb_driver = {
},
};
-static int __init macb_init(void)
-{
- return platform_driver_probe(&macb_driver, macb_probe);
-}
-
-static void __exit macb_exit(void)
-{
- platform_driver_unregister(&macb_driver);
-}
-
-module_init(macb_init);
-module_exit(macb_exit);
+module_platform_driver_probe(macb_driver, macb_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6db997c78a5..681804b30a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -54,6 +54,10 @@
#define FW_VERSION_MINOR 1
#define FW_VERSION_MICRO 0
+#define FW_VERSION_MAJOR_T5 0
+#define FW_VERSION_MINOR_T5 0
+#define FW_VERSION_MICRO_T5 0
+
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
@@ -66,7 +70,9 @@ enum {
enum {
MEM_EDC0,
MEM_EDC1,
- MEM_MC
+ MEM_MC,
+ MEM_MC0 = MEM_MC,
+ MEM_MC1
};
enum {
@@ -74,8 +80,10 @@ enum {
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
+ MEMWIN1_BASE_T5 = 0x52000,
MEMWIN2_APERTURE = 65536,
MEMWIN2_BASE = 0x30000,
+ MEMWIN2_BASE_T5 = 0x54000,
};
enum dev_master {
@@ -431,6 +439,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ u64 udb;
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -504,13 +513,44 @@ struct sge {
struct l2t_data;
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
+#ifdef CONFIG_PCI_IOV
+
+/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
+ * Configuration initialization for T5 only has SR-IOV functionality enabled
+ * on PF0-3 in order to simplify everything.
+ */
+#define NUM_OF_PF_WITH_SRIOV 4
+
+#endif
+
struct adapter {
void __iomem *regs;
+ void __iomem *bar2;
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
unsigned int fn;
unsigned int flags;
+ enum chip_type chip;
int msg_enable;
@@ -673,6 +713,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_t5(enum chip_type chip)
+{
+ return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
+}
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+ u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e707e31abd8..e76cf035100 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -68,8 +68,8 @@
#include "t4fw_api.h"
#include "l2t.h"
-#define DRV_VERSION "1.3.0-ko"
-#define DRV_DESC "Chelsio T4 Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Network Driver"
/*
* Max interrupt hold-off timer value in us. Queues fall back to this value
@@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x440a, 4),
CH_DEVICE(0x440d, 4),
CH_DEVICE(0x440e, 4),
+ CH_DEVICE(0x5001, 5),
+ CH_DEVICE(0x5002, 5),
+ CH_DEVICE(0x5003, 5),
+ CH_DEVICE(0x5004, 5),
+ CH_DEVICE(0x5005, 5),
+ CH_DEVICE(0x5006, 5),
+ CH_DEVICE(0x5007, 5),
+ CH_DEVICE(0x5008, 5),
+ CH_DEVICE(0x5009, 5),
+ CH_DEVICE(0x500A, 5),
+ CH_DEVICE(0x500B, 5),
+ CH_DEVICE(0x500C, 5),
+ CH_DEVICE(0x500D, 5),
+ CH_DEVICE(0x500E, 5),
+ CH_DEVICE(0x500F, 5),
+ CH_DEVICE(0x5010, 5),
+ CH_DEVICE(0x5011, 5),
+ CH_DEVICE(0x5012, 5),
+ CH_DEVICE(0x5013, 5),
+ CH_DEVICE(0x5401, 5),
+ CH_DEVICE(0x5402, 5),
+ CH_DEVICE(0x5403, 5),
+ CH_DEVICE(0x5404, 5),
+ CH_DEVICE(0x5405, 5),
+ CH_DEVICE(0x5406, 5),
+ CH_DEVICE(0x5407, 5),
+ CH_DEVICE(0x5408, 5),
+ CH_DEVICE(0x5409, 5),
+ CH_DEVICE(0x540A, 5),
+ CH_DEVICE(0x540B, 5),
+ CH_DEVICE(0x540C, 5),
+ CH_DEVICE(0x540D, 5),
+ CH_DEVICE(0x540E, 5),
+ CH_DEVICE(0x540F, 5),
+ CH_DEVICE(0x5410, 5),
+ CH_DEVICE(0x5411, 5),
+ CH_DEVICE(0x5412, 5),
+ CH_DEVICE(0x5413, 5),
{ 0, }
};
#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW5_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -319,7 +360,10 @@ static bool vf_acls;
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
-static unsigned int num_vf[4];
+/* Configure the number of PCI-E Virtual Function which are to be instantiated
+ * on SR-IOV Capable Physical Functions.
+ */
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
@@ -1002,21 +1046,36 @@ freeout: t4_free_sge_resources(adap);
static int upgrade_fw(struct adapter *adap)
{
int ret;
- u32 vers;
+ u32 vers, exp_major;
const struct fw_hdr *hdr;
const struct firmware *fw;
struct device *dev = adap->pdev_dev;
+ char *fw_file_name;
- ret = request_firmware(&fw, FW_FNAME, dev);
+ switch (CHELSIO_CHIP_VERSION(adap->chip)) {
+ case CHELSIO_T4:
+ fw_file_name = FW_FNAME;
+ exp_major = FW_VERSION_MAJOR;
+ break;
+ case CHELSIO_T5:
+ fw_file_name = FW5_FNAME;
+ exp_major = FW_VERSION_MAJOR_T5;
+ break;
+ default:
+ dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
+ return -EINVAL;
+ }
+
+ ret = request_firmware(&fw, fw_file_name, dev);
if (ret < 0) {
- dev_err(dev, "unable to load firmware image " FW_FNAME
- ", error %d\n", ret);
+ dev_err(dev, "unable to load firmware image %s, error %d\n",
+ fw_file_name, ret);
return ret;
}
hdr = (const struct fw_hdr *)fw->data;
vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
ret = -EINVAL; /* wrong major version, won't do */
goto out;
}
@@ -1024,18 +1083,15 @@ static int upgrade_fw(struct adapter *adap)
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
vers > adap->params.fw_vers) {
dev_info(dev, "upgrading firmware ...\n");
ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
/*force=*/false);
if (!ret)
- dev_info(dev, "firmware successfully upgraded to "
- FW_FNAME " (%d.%d.%d.%d)\n",
- FW_HDR_FW_VER_MAJOR_GET(vers),
- FW_HDR_FW_VER_MINOR_GET(vers),
- FW_HDR_FW_VER_MICRO_GET(vers),
- FW_HDR_FW_VER_BUILD_GET(vers));
+ dev_info(dev,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, fw_file_name);
else
dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
} else {
@@ -1308,6 +1364,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
+ "WriteCoalSuccess ",
+ "WriteCoalFail ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -1321,10 +1379,15 @@ static int get_sset_count(struct net_device *dev, int sset)
}
#define T4_REGMAP_SIZE (160 * 1024)
+#define T5_REGMAP_SIZE (332 * 1024)
static int get_regs_len(struct net_device *dev)
{
- return T4_REGMAP_SIZE;
+ struct adapter *adap = netdev2adap(dev);
+ if (is_t4(adap->chip))
+ return T4_REGMAP_SIZE;
+ else
+ return T5_REGMAP_SIZE;
}
static int get_eeprom_len(struct net_device *dev)
@@ -1398,11 +1461,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ u32 val1, val2;
t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+ data += sizeof(struct queue_port_stats) / sizeof(u64);
+ if (!is_t4(adapter->chip)) {
+ t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+ *data = val1 - val2;
+ data++;
+ *data = val2;
+ data++;
+ } else {
+ memset(data, 0, 2 * sizeof(u64));
+ *data += 2;
+ }
}
/*
@@ -1413,7 +1490,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return 4 | (ap->params.rev << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->chip) |
+ (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1428,7 +1506,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
- static const unsigned int reg_ranges[] = {
+ static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
0x1180, 0x11b4,
0x11fc, 0x123c,
@@ -1648,13 +1726,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x27e00, 0x27e04
};
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x1148,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a9c,
+ 0x5b9c, 0x5bfc,
+ 0x6000, 0x6040,
+ 0x6058, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x90f8,
+ 0x9400, 0x9470,
+ 0x9600, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c60,
+ 0x19c94, 0x19e10,
+ 0x19e50, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30144,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x31600,
+ 0x31608, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e04, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32208, 0x3223c,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b70,
+ 0x33000, 0x33048,
+ 0x33060, 0x3309c,
+ 0x330f0, 0x33148,
+ 0x33160, 0x3319c,
+ 0x331f0, 0x332e4,
+ 0x332f8, 0x333e4,
+ 0x333f8, 0x33448,
+ 0x33460, 0x3349c,
+ 0x334f0, 0x33548,
+ 0x33560, 0x3359c,
+ 0x335f0, 0x336e4,
+ 0x336f8, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33948,
+ 0x33960, 0x3399c,
+ 0x339f0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34144,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x35600,
+ 0x35608, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e04, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36208, 0x3623c,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b70,
+ 0x37000, 0x37048,
+ 0x37060, 0x3709c,
+ 0x370f0, 0x37148,
+ 0x37160, 0x3719c,
+ 0x371f0, 0x372e4,
+ 0x372f8, 0x373e4,
+ 0x373f8, 0x37448,
+ 0x37460, 0x3749c,
+ 0x374f0, 0x37548,
+ 0x37560, 0x3759c,
+ 0x375f0, 0x376e4,
+ 0x376f8, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37948,
+ 0x37960, 0x3799c,
+ 0x379f0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38100, 0x38144,
+ 0x38190, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a04,
+ 0x38a0c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38c24,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x39600,
+ 0x39608, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e04, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a208, 0x3a23c,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab70,
+ 0x3b000, 0x3b048,
+ 0x3b060, 0x3b09c,
+ 0x3b0f0, 0x3b148,
+ 0x3b160, 0x3b19c,
+ 0x3b1f0, 0x3b2e4,
+ 0x3b2f8, 0x3b3e4,
+ 0x3b3f8, 0x3b448,
+ 0x3b460, 0x3b49c,
+ 0x3b4f0, 0x3b548,
+ 0x3b560, 0x3b59c,
+ 0x3b5f0, 0x3b6e4,
+ 0x3b6f8, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b948,
+ 0x3b960, 0x3b99c,
+ 0x3b9f0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca04,
+ 0x3ca0c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3cc24,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d600,
+ 0x3d608, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de04, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e208, 0x3e23c,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb70,
+ 0x3f000, 0x3f048,
+ 0x3f060, 0x3f09c,
+ 0x3f0f0, 0x3f148,
+ 0x3f160, 0x3f19c,
+ 0x3f1f0, 0x3f2e4,
+ 0x3f2f8, 0x3f3e4,
+ 0x3f3f8, 0x3f448,
+ 0x3f460, 0x3f49c,
+ 0x3f4f0, 0x3f548,
+ 0x3f560, 0x3f59c,
+ 0x3f5f0, 0x3f6e4,
+ 0x3f6f8, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f948,
+ 0x3f960, 0x3f99c,
+ 0x3f9f0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40068,
+ 0x40080, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40298,
+ 0x402ac, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41300, 0x413c4,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44078,
+ 0x440c0, 0x44278,
+ 0x442c0, 0x44478,
+ 0x444c0, 0x44678,
+ 0x446c0, 0x44878,
+ 0x448c0, 0x449fc,
+ 0x45000, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48068,
+ 0x48080, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48298,
+ 0x482ac, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49300, 0x493c4,
+ 0x49400, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c078,
+ 0x4c0c0, 0x4c278,
+ 0x4c2c0, 0x4c478,
+ 0x4c4c0, 0x4c678,
+ 0x4c6c0, 0x4c878,
+ 0x4c8c0, 0x4c9fc,
+ 0x4d000, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
int i;
struct adapter *ap = netdev2adap(dev);
+ static const unsigned int *reg_ranges;
+ int arr_size = 0, buf_size = 0;
+
+ if (is_t4(ap->chip)) {
+ reg_ranges = &t4_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t4_reg_ranges);
+ buf_size = T4_REGMAP_SIZE;
+ } else {
+ reg_ranges = &t5_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t5_reg_ranges);
+ buf_size = T5_REGMAP_SIZE;
+ }
regs->version = mk_adap_vers(ap);
- memset(buf, 0, T4_REGMAP_SIZE);
- for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ memset(buf, 0, buf_size);
+ for (i = 0; i < arr_size; i += 2)
reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
}
@@ -2363,8 +2880,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
int ret, ofst;
__be32 data[16];
- if (mem == MEM_MC)
- ret = t4_mc_read(adap, pos, data, NULL);
+ if ((mem == MEM_MC) || (mem == MEM_MC1))
+ ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mem, pos, data, NULL);
if (ret)
@@ -2405,18 +2922,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
static int setup_debugfs(struct adapter *adap)
{
int i;
+ u32 size;
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE)
- add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
- if (i & EDRAM1_ENABLE)
- add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (i & EDRAM0_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
+ }
+ if (i & EDRAM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
+ }
+ if (is_t4(adap->chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ } else {
+ if (i & EXT_MEM_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+ }
if (adap->l2t)
debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
&t4_l2t_fops);
@@ -2747,10 +3283,18 @@ EXPORT_SYMBOL(cxgb4_port_chan);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+ return lpfifo ? lp_count : hp_count;
}
EXPORT_SYMBOL(cxgb4_dbfifo_count);
@@ -2853,6 +3397,25 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+void cxgb4_disable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
+ F_NOCOALESCE);
+}
+EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
+
+void cxgb4_enable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+}
+EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
@@ -2888,14 +3451,23 @@ static struct notifier_block cxgb4_netevent_nb = {
static void drain_db_fifo(struct adapter *adap, int usecs)
{
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
do {
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+
+ if (lp_count == 0 && hp_count == 0)
+ break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(usecs));
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
- break;
} while (1);
}
@@ -3004,24 +3576,62 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
+ if (is_t4(adap->chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+ drain_db_fifo(adap, 1);
+ recover_all_queues(adap);
+ enable_dbs(adap);
+ } else {
+ u32 dropped_db = t4_read_reg(adap, 0x010ac);
+ u16 qid = (dropped_db >> 15) & 0x1ffff;
+ u16 pidx_inc = dropped_db & 0x1fff;
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+ u32 udb;
+
+ dev_warn(adap->pdev_dev,
+ "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
+ dropped_db, qid,
+ (dropped_db >> 14) & 1,
+ (dropped_db >> 13) & 1,
+ pidx_inc);
+
+ drain_db_fifo(adap, 1);
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ udb = qid << qpshift;
+ udb &= PAGE_MASK;
+ page = udb / PAGE_SIZE;
+ udb += (qid - (page * udb_density)) * 128;
+
+ writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
+
+ /* Re-enable BAR2 WC */
+ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
+ }
+
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
- disable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
- recover_all_queues(adap);
- enable_dbs(adap);
}
void t4_db_full(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
- queue_work(workq, &adap->db_full_task);
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+ queue_work(workq, &adap->db_full_task);
+ }
}
void t4_db_dropped(struct adapter *adap)
{
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->chip))
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3566,17 +4176,27 @@ void t4_fatal_err(struct adapter *adap)
static void setup_memwin(struct adapter *adap)
{
- u32 bar0;
+ u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ if (is_t4(adap->chip)) {
+ mem_win0_base = bar0 + MEMWIN0_BASE;
+ mem_win1_base = bar0 + MEMWIN1_BASE;
+ mem_win2_base = bar0 + MEMWIN2_BASE;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+ mem_win1_base = MEMWIN1_BASE_T5;
+ mem_win2_base = MEMWIN2_BASE_T5;
+ }
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- (bar0 + MEMWIN0_BASE) | BIR(0) |
+ mem_win0_base | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- (bar0 + MEMWIN1_BASE) | BIR(0) |
+ mem_win1_base | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- (bar0 + MEMWIN2_BASE) | BIR(0) |
+ mem_win2_base | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
@@ -3745,6 +4365,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
int ret, using_flash;
+ char *fw_config_file, fw_config_file_path[256];
/*
* Reset device if necessary.
@@ -3761,7 +4382,21 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ fw_config_file = FW_CFNAME;
+ break;
+ case CHELSIO_T5:
+ fw_config_file = FW5_CFNAME;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ adapter->pdev->device);
+ ret = -EINVAL;
+ goto bye;
+ }
+
+ ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
using_flash = 1;
mtype = FW_MEMTYPE_CF_FLASH;
@@ -3877,6 +4512,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
+ sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -3887,7 +4523,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"Configuration File %s, version %#x, computed checksum %#x\n",
(using_flash
? "in device FLASH"
- : "/lib/firmware/" FW_CFNAME),
+ : fw_config_file_path),
finiver, cfcsum);
return 0;
@@ -4814,7 +5450,8 @@ static void print_port_info(const struct net_device *dev)
sprintf(bufp, "BASE-%s", base[pi->port_type]);
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev, buf,
+ adap->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
@@ -4854,10 +5491,11 @@ static void free_some_resources(struct adapter *adapter)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+#define SEGMENT_SIZE 128
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int func, i, err;
+ int func, i, err, s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
@@ -4934,7 +5572,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
- goto out_unmap_bar;
+ goto out_unmap_bar0;
+
+ if (!is_t4(adapter->chip)) {
+ s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
+ qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ num_seg = PAGE_SIZE / SEGMENT_SIZE;
+
+ /* Each segment size is 128B. Write coalescing is enabled only
+ * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
+ * queue is less no of segments that can be accommodated in
+ * a page size.
+ */
+ if (qpp > num_seg) {
+ dev_err(&pdev->dev,
+ "Incorrect number of egress queues per page\n");
+ err = -EINVAL;
+ goto out_unmap_bar0;
+ }
+ adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!adapter->bar2) {
+ dev_err(&pdev->dev, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+ }
+
setup_memwin(adapter);
err = adap_init0(adapter);
setup_memwin_rdma(adapter);
@@ -5063,6 +5728,9 @@ sriov:
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
+ out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
kfree(adapter);
@@ -5113,6 +5781,8 @@ static void remove_one(struct pci_dev *pdev)
free_some_resources(adapter);
iounmap(adapter->regs);
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e2bbc7f3e2d..4faf4d067ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
+void cxgb4_disable_db_coalescing(struct net_device *dev);
+void cxgb4_enable_db_coalescing(struct net_device *dev);
+
#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fe9a2ea3588..8b47b253e20 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
+ u32 val;
if (q->pend_cred >= 8) {
+ val = PIDX(q->pend_cred / 8);
+ if (!is_t4(adap->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
- QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ QID(q->cntxt_id) | val);
q->pend_cred &= 7;
}
}
@@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space(user space writes).
+ * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
+ */
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
+{
+ int count = 8;
+
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
/**
* ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
@@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
+ unsigned int *wr, index;
+
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(n));
+ if (is_t4(adap->chip)) {
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+ } else {
+ if (n == 1) {
+ index = q->pidx ? (q->pidx - 1) : (q->size - 1);
+ wr = (unsigned int *)&q->desc[index];
+ cxgb_pio_copy((u64 __iomem *)
+ (adap->bar2 + q->udb + 64),
+ (u64 *)wr);
+ } else
+ writel(n, adap->bar2 + q->udb + 8);
+ wmb();
+ }
}
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
@@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- struct cpl_trace_pkt *p;
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
@@ -1563,8 +1596,11 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
- p = (struct cpl_trace_pkt *)skb->data;
- __skb_pull(skb, sizeof(*p));
+ if (is_t4(adap->chip))
+ __skb_pull(skb, sizeof(struct cpl_trace_pkt));
+ else
+ __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
+
skb_reset_mac_header(skb);
skb->protocol = htons(0xffff);
skb->dev = adap->port[0];
@@ -1625,8 +1661,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
+ int cpl_trace_pkt = is_t4(q->adap->chip) ?
+ CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
- if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
@@ -2143,11 +2181,27 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
+ q->cntxt_id = id;
+ if (!is_t4(adap->chip)) {
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ q->udb = q->cntxt_id << qpshift;
+ q->udb &= PAGE_MASK;
+ page = q->udb / PAGE_SIZE;
+ q->udb += (q->cntxt_id - (page * udb_density)) * 128;
+ }
+
q->in_use = 0;
q->cidx = q->pidx = 0;
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
- q->cntxt_id = id;
spin_lock_init(&q->db_lock);
adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
@@ -2587,11 +2641,20 @@ static int t4_sge_init_hard(struct adapter *adap)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_HP_INT_THRESH(M_HP_INT_THRESH) |
- V_LP_INT_THRESH(M_LP_INT_THRESH),
- V_HP_INT_THRESH(dbfifo_int_thresh) |
- V_LP_INT_THRESH(dbfifo_int_thresh));
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_HP_INT_THRESH(M_HP_INT_THRESH) |
+ V_LP_INT_THRESH(M_LP_INT_THRESH),
+ V_HP_INT_THRESH(dbfifo_int_thresh) |
+ V_LP_INT_THRESH(dbfifo_int_thresh));
+ } else {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
+ V_LP_INT_THRESH_T5(dbfifo_int_thresh));
+ t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
+ V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
+ V_HP_INT_THRESH_T5(dbfifo_int_thresh));
+ }
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
F_ENABLE_DROP);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8049268ce0f..d02d4e8c441 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -282,6 +282,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
* t4_mc_read - read from MC through backdoor accesses
* @adap: the adapter
* @addr: address of first byte requested
+ * @idx: which MC to access
* @data: 64 bytes of data containing the requested address
* @ecc: where to store the corresponding 64-bit ECC word
*
@@ -289,22 +290,38 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
* that covers the requested address @addr. If @parity is not %NULL it
* is assigned the 64-bit ECC word for the read data.
*/
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
+ u32 mc_bist_status_rdata, mc_bist_data_pattern;
+
+ if (is_t4(adap->chip)) {
+ mc_bist_cmd = MC_BIST_CMD;
+ mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
+ mc_bist_cmd_len = MC_BIST_CMD_LEN;
+ mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
+ mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+ } else {
+ mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
+ mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
+ mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
+ mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ }
- if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
+ if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
return -EBUSY;
- t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
- t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
- t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
- t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
+ t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
+ t4_write_reg(adap, mc_bist_cmd_len, 64);
+ t4_write_reg(adap, mc_bist_data_pattern, 0xc);
+ t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
BIST_CMD_GAP(1));
- i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
@@ -329,20 +346,39 @@ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
+ u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
+
+ if (is_t4(adap->chip)) {
+ edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
+ edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
+ idx);
+ edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+ idx);
+ } else {
+ edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern =
+ EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ edc_bist_status_rdata =
+ EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ }
- idx *= EDC_STRIDE;
- if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
+ if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
return -EBUSY;
- t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
- t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
- t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
- t4_write_reg(adap, EDC_BIST_CMD + idx,
+ t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
+ t4_write_reg(adap, edc_bist_cmd_len, 64);
+ t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
+ t4_write_reg(adap, edc_bist_cmd,
BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
- i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
@@ -366,6 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
{
int i;
+ u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
/*
* Setup offset into PCIE memory window. Address must be a
@@ -374,7 +411,7 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
* values.)
*/
t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
- addr & ~(MEMWIN0_APERTURE - 1));
+ (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
@@ -410,6 +447,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
__be32 *buf, int dir)
{
u32 pos, start, end, offset, memoffset;
+ u32 edc_size, mc_size;
int ret = 0;
__be32 *data;
@@ -423,13 +461,21 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
if (!data)
return -ENOMEM;
- /*
- * Offset into the region of memory which is being accessed
+ /* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2
+ * MEM_MC = 2 -- T4
+ * MEM_MC0 = 2 -- For T5
+ * MEM_MC1 = 3 -- For T5
*/
- memoffset = (mtype * (5 * 1024 * 1024));
+ edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
/* Determine the PCIE_MEM_ACCESS_OFFSET */
addr = addr + memoffset;
@@ -497,9 +543,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_LEN 512
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -856,6 +902,7 @@ int t4_check_fw_version(struct adapter *adapter)
{
u32 api_vers[2];
int ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
ret = get_fw_version(adapter, &adapter->params.fw_vers);
if (!ret)
@@ -870,17 +917,35 @@ int t4_check_fw_version(struct adapter *adapter)
major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ exp_major = FW_VERSION_MAJOR;
+ exp_minor = FW_VERSION_MINOR;
+ exp_micro = FW_VERSION_MICRO;
+ break;
+ case CHELSIO_T5:
+ exp_major = FW_VERSION_MAJOR_T5;
+ exp_minor = FW_VERSION_MINOR_T5;
+ exp_micro = FW_VERSION_MICRO_T5;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
+ adapter->chip);
+ return -EINVAL;
+ }
+
memcpy(adapter->params.api_vers, api_vers,
sizeof(adapter->params.api_vers));
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ if (major != exp_major) { /* major mismatch - fail */
dev_err(adapter->pdev_dev,
"card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR);
+ major, exp_major);
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == exp_minor && micro == exp_micro)
return 0; /* perfect match */
/* Minor/micro version mismatch. Report it but often it's OK. */
@@ -1246,6 +1311,45 @@ static void pcie_intr_handler(struct adapter *adapter)
{ 0 }
};
+ static struct intr_info t5_pcie_intr_info[] = {
+ { MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
+ { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR, "Outbound read error", -1, 0 },
+ { 0 }
+ };
+
int fat;
fat = t4_handle_intr_status(adapter,
@@ -1254,7 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter)
t4_handle_intr_status(adapter,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ is_t4(adapter->chip) ?
+ pcie_intr_info : t5_pcie_intr_info);
+
if (fat)
t4_fatal_err(adapter);
}
@@ -1664,7 +1771,14 @@ static void ncsi_intr_handler(struct adapter *adap)
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
- u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ u32 v, int_cause_reg;
+
+ if (is_t4(adap->chip))
+ int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+ else
+ int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+
+ v = t4_read_reg(adap, int_cause_reg);
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
@@ -2126,7 +2240,9 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+ t4_read_reg64(adap, \
+ (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->tx_octets = GET_STAT(TX_PORT_BYTES);
@@ -2205,14 +2321,26 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr)
{
+ u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+
+ if (is_t4(adap->chip)) {
+ mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ }
+
if (addr) {
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+ t4_write_reg(adap, mag_id_reg_l,
(addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | addr[5]);
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+ t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+ t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
addr ? MAGICEN : 0);
}
@@ -2235,16 +2363,23 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u64 mask0, u64 mask1, unsigned int crc, bool enable)
{
int i;
+ u32 port_cfg_reg;
+
+ if (is_t4(adap->chip))
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ else
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
if (!enable) {
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
- PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
-#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+#define EPIO_REG(name) \
+ (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
+ T5_PORT_REG(port, MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2322,24 +2457,24 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
* @addr: address of first byte requested aligned on 32b.
* @data: len bytes to hold the data read
* @len: amount of data to read from window. Must be <=
- * MEMWIN0_APERATURE after adjusting for 16B alignment
- * requirements of the the memory window.
+ * MEMWIN0_APERATURE after adjusting for 16B for T4 and
+ * 128B for T5 alignment requirements of the the memory window.
*
* Read len bytes of data from MC starting at @addr.
*/
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
{
- int i;
- int off;
+ int i, off;
+ u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
- /*
- * Align on a 16B boundary.
+ /* Align on a 2KB boundary.
*/
- off = addr & 15;
+ off = addr & MEMWIN0_APERTURE;
if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
return -EINVAL;
- t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
+ (addr & ~MEMWIN0_APERTURE) | win_pf);
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4)
@@ -3162,6 +3297,9 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
+ unsigned int max_naddr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (naddr > 7)
return -EINVAL;
@@ -3187,8 +3325,8 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
if (idx)
- idx[i] = index >= NEXACT_MAC ? 0xffff : index;
- if (index < NEXACT_MAC)
+ idx[i] = index >= max_naddr ? 0xffff : index;
+ if (index < max_naddr)
ret++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[i]));
@@ -3221,6 +3359,9 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
+ unsigned int max_mac_addr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -3238,7 +3379,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
- if (ret >= NEXACT_MAC)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -3547,7 +3688,8 @@ static int get_flash_params(struct adapter *adap)
*/
int t4_prep_adapter(struct adapter *adapter)
{
- int ret;
+ int ret, ver;
+ uint16_t device_id;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
@@ -3562,6 +3704,28 @@ int t4_prep_adapter(struct adapter *adapter)
return ret;
}
+ /* Retrieve adapter's device ID
+ */
+ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
+ ver = device_id >> 12;
+ switch (ver) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
+ adapter->params.rev);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
+ adapter->params.rev);
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ device_id);
+ return -EINVAL;
+ }
+
+ /* Reassign the updated revision field */
+ adapter->params.rev = adapter->chip;
+
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index f534ed7e10e..1d1623be9f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
- NEXACT_MAC = 336, /* # of exact MAC address filters */
L2T_SIZE = 4096, /* # of L2T entries */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 261d17703ad..47656ac1ac2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -74,6 +74,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_TRACE_PKT_T5 = 0x48,
CPL_RDMA_READ_REQ = 0x60,
@@ -287,6 +288,23 @@ struct cpl_act_open_req {
__be32 opt2;
};
+#define S_FILTER_TUPLE 24
+#define M_FILTER_TUPLE 0xFFFFFFFFFF
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+struct cpl_t5_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+};
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -566,6 +584,11 @@ struct cpl_rx_pkt {
#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+#define S_RX_T5_ETHHDR_LEN 0
+#define M_RX_T5_ETHHDR_LEN 0x3F
+#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
+#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
+
#define S_RX_MACIDX 8
#define M_RX_MACIDX 0x1FF
#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
@@ -612,6 +635,28 @@ struct cpl_trace_pkt {
__be64 tstamp;
};
+struct cpl_t5_trace_pkt {
+ __u8 opcode;
+ __u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 runt:4;
+ __u8 filter_hit:4;
+ __u8:6;
+ __u8 err:1;
+ __u8 trunc:1;
+#else
+ __u8 filter_hit:4;
+ __u8 runt:4;
+ __u8 trunc:1;
+ __u8 err:1;
+ __u8:6;
+#endif
+ __be16 rsvd;
+ __be16 len;
+ __be64 tstamp;
+ __be64 rsvd1;
+};
+
struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
@@ -742,4 +787,12 @@ struct ulp_mem_io {
#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+#define S_T5_ULP_MEMIO_IMM 23
+#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
+#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
+
+#define S_T5_ULP_MEMIO_ORDER 22
+#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
+#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 83ec5f7844a..ef146c0ba48 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -68,9 +68,14 @@
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
#define DBPRIO(x) ((x) << 14)
+#define DBTYPE(x) ((x) << 13)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
+#define S_PIDX_T5 0
+#define M_PIDX_T5 0x1fffU
+#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
#define SGE_PF_GTS 0x4
#define INGRESSQID_MASK 0xffff0000U
@@ -152,6 +157,8 @@
#define QUEUESPERPAGEPF0_MASK 0x0000000fU
#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define QUEUESPERPAGEPF1 4
+
#define SGE_INT_CAUSE1 0x1024
#define SGE_INT_CAUSE2 0x1030
#define SGE_INT_CAUSE3 0x103c
@@ -234,6 +241,10 @@
#define SGE_DOORBELL_CONTROL 0x10a8
#define ENABLE_DROP (1 << 13)
+#define S_NOCOALESCE 26
+#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
+#define F_NOCOALESCE V_NOCOALESCE(1U)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -272,17 +283,36 @@
#define S_HP_INT_THRESH 28
#define M_HP_INT_THRESH 0xfU
#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define S_LP_INT_THRESH_T5 18
+#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
+#define M_LP_COUNT_T5 0x3ffffU
+#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
#define M_HP_COUNT 0x7ffU
#define S_HP_COUNT 16
#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
#define S_LP_INT_THRESH 12
#define M_LP_INT_THRESH 0xfU
+#define M_LP_INT_THRESH_T5 0xfffU
#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
#define M_LP_COUNT 0x7ffU
#define S_LP_COUNT 0
#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
#define A_SGE_DBFIFO_STATUS 0x10a4
+#define SGE_STAT_TOTAL 0x10e4
+#define SGE_STAT_MATCH 0x10e8
+
+#define SGE_STAT_CFG 0x10ec
+#define S_STATSOURCE_T5 9
+#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+
+#define SGE_DBFIFO_STATUS2 0x1118
+#define M_HP_COUNT_T5 0x3ffU
+#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
+#define S_HP_INT_THRESH_T5 10
+#define M_HP_INT_THRESH_T5 0xfU
+#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
+
#define S_ENABLE_DROP 13
#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
#define F_ENABLE_DROP V_ENABLE_DROP(1U)
@@ -331,8 +361,27 @@
#define MSIADDRHPERR 0x00000002U
#define MSIADDRLPERR 0x00000001U
+#define READRSPERR 0x20000000U
+#define TRGT1GRPPERR 0x10000000U
+#define IPSOTPERR 0x08000000U
+#define IPRXDATAGRPPERR 0x02000000U
+#define IPRXHDRGRPPERR 0x01000000U
+#define MAGRPPERR 0x00400000U
+#define VFIDPERR 0x00200000U
+#define HREQWRPERR 0x00010000U
+#define DREQWRPERR 0x00002000U
+#define MSTTAGQPERR 0x00000400U
+#define PIOREQGRPPERR 0x00000100U
+#define PIOCPLGRPPERR 0x00000080U
+#define MSIXSTIPERR 0x00000004U
+#define MSTTIMEOUTPERR 0x00000002U
+#define MSTGRPPERR 0x00000001U
+
#define PCIE_NONFAT_ERR 0x3010
#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
#define PCIEOFST_MASK 0xfffffc00U
#define BIR_MASK 0x00000300U
#define BIR_SHIFT 8
@@ -342,6 +391,9 @@
#define WINDOW(x) ((x) << WINDOW_SHIFT)
#define PCIE_MEM_ACCESS_OFFSET 0x306c
+#define S_PFNUM 0
+#define V_PFNUM(x) ((x) << S_PFNUM)
+
#define PCIE_FW 0x30b8
#define PCIE_FW_ERR 0x80000000U
#define PCIE_FW_INIT 0x40000000U
@@ -407,12 +459,18 @@
#define MC_BIST_STATUS_RDATA 0x7688
+#define MA_EDRAM0_BAR 0x77c0
+#define MA_EDRAM1_BAR 0x77c4
+#define EDRAM_SIZE_MASK 0xfffU
+#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
+
#define MA_EXT_MEMORY_BAR 0x77c8
#define EXT_MEM_SIZE_MASK 0x00000fffU
#define EXT_MEM_SIZE_SHIFT 0
#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
#define MA_TARGET_MEM_ENABLE 0x77d8
+#define EXT_MEM1_ENABLE 0x00000010U
#define EXT_MEM_ENABLE 0x00000004U
#define EDRAM1_ENABLE 0x00000002U
#define EDRAM0_ENABLE 0x00000001U
@@ -431,6 +489,7 @@
#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
#define EDC_BIST_CMD 0x7904
@@ -801,6 +860,15 @@
#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MAC_PORT_CFG2 0x818
+#define MAC_PORT_MAGIC_MACID_LO 0x824
+#define MAC_PORT_MAGIC_MACID_HI 0x828
+#define MAC_PORT_EPIO_DATA0 0x8c0
+#define MAC_PORT_EPIO_DATA1 0x8c4
+#define MAC_PORT_EPIO_DATA2 0x8c8
+#define MAC_PORT_EPIO_DATA3 0x8cc
+#define MAC_PORT_EPIO_OP 0x8d0
+
#define MPS_CMN_CTL 0x9000
#define NUMPORTS_MASK 0x00000003U
#define NUMPORTS_SHIFT 0
@@ -1063,6 +1131,7 @@
#define ADDRESS_SHIFT 0
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+#define MAC_PORT_INT_CAUSE 0x8dc
#define XGMAC_PORT_INT_CAUSE 0x10dc
#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
@@ -1101,4 +1170,33 @@
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MC_0_BASE_ADDR 0x40000
+#define MC_1_BASE_ADDR 0x48000
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_P_BIST_CMD 0x41400
+#define MC_P_BIST_CMD_ADDR 0x41404
+#define MC_P_BIST_CMD_LEN 0x41408
+#define MC_P_BIST_DATA_PATTERN 0x4140c
+#define MC_P_BIST_STATUS_RDATA 0x41488
+#define EDC_T50_BASE_ADDR 0x50000
+#define EDC_H_BIST_CMD 0x50004
+#define EDC_H_BIST_CMD_ADDR 0x50008
+#define EDC_H_BIST_CMD_LEN 0x5000c
+#define EDC_H_BIST_DATA_PATTERN 0x50010
+#define EDC_H_BIST_STATUS_RDATA 0x50028
+
+#define EDC_T51_BASE_ADDR 0x50800
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a0dcccd846c..93444325b1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -574,7 +574,7 @@ struct fw_eth_tx_pkt_vm_wr {
__be16 vlantci;
};
-#define FW_CMD_MAX_TIMEOUT 3000
+#define FW_CMD_MAX_TIMEOUT 10000
/*
* If a host driver does a HELLO and discovers that there's already a MASTER
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7..be5c7ef6ca9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,6 +344,7 @@ struct adapter {
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
+ enum chip_type chip;
struct adapter_params params;
/* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 56b46ab2d4c..7fcac200376 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -54,8 +54,8 @@
/*
* Generic information about the driver.
*/
-#define DRV_VERSION "1.0.0"
-#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
/*
* Module Parameters.
@@ -1050,7 +1050,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
/*
* Chip version 4, revision 0x3f (cxgb4vf).
*/
- return 4 | (0x3f << 10);
+ return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
}
/*
@@ -2099,6 +2099,15 @@ static int adap_init0(struct adapter *adapter)
return err;
}
+ switch (adapter->pdev->device >> 12) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
+ break;
+ }
+
/*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
@@ -2888,6 +2897,26 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x480a, 0), /* T404-bt */
CH_DEVICE(0x480d, 0), /* T480-cr */
CH_DEVICE(0x480e, 0), /* T440-lp-cr */
+ CH_DEVICE(0x5800, 0), /* T580-dbg */
+ CH_DEVICE(0x5801, 0), /* T520-cr */
+ CH_DEVICE(0x5802, 0), /* T522-cr */
+ CH_DEVICE(0x5803, 0), /* T540-cr */
+ CH_DEVICE(0x5804, 0), /* T520-bch */
+ CH_DEVICE(0x5805, 0), /* T540-bch */
+ CH_DEVICE(0x5806, 0), /* T540-ch */
+ CH_DEVICE(0x5807, 0), /* T520-so */
+ CH_DEVICE(0x5808, 0), /* T520-cx */
+ CH_DEVICE(0x5809, 0), /* T520-bt */
+ CH_DEVICE(0x580a, 0), /* T504-bt */
+ CH_DEVICE(0x580b, 0), /* T520-sr */
+ CH_DEVICE(0x580c, 0), /* T504-bt */
+ CH_DEVICE(0x580d, 0), /* T580-cr */
+ CH_DEVICE(0x580e, 0), /* T540-lp-cr */
+ CH_DEVICE(0x580f, 0), /* Amsterdam */
+ CH_DEVICE(0x5810, 0), /* T580-lp-cr */
+ CH_DEVICE(0x5811, 0), /* T520-lp-cr */
+ CH_DEVICE(0x5812, 0), /* T560-cr */
+ CH_DEVICE(0x5813, 0), /* T580-cr */
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9488032d6d2..61dfb2a4792 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
+ u32 val;
+
/*
* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
* of multiples of Free List Entries per Egress Queue Units ...
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+ val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ if (!is_t4(adapter->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
DBPRIO(1) |
- QID(fl->cntxt_id) |
- PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+ QID(fl->cntxt_id) | val);
fl->pend_cred %= FL_PER_EQ_UNIT;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 283f9d0d37f..53cbfed21d0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -38,6 +38,25 @@
#include "../cxgb4/t4fw_api.h"
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
/*
* The "len16" field of a Firmware Command Structure ...
*/
@@ -232,6 +251,11 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 7127c7b9efd..9f96dc3bb11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,8 +1027,11 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
+ unsigned int max_naddr = is_t4(adapter->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
- if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
+ if (naddr > max_naddr)
return -EINVAL;
for (offset = 0; offset < naddr; /**/) {
@@ -1069,10 +1072,10 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
if (idx)
idx[offset+i] =
- (index >= FW_CLS_TCAM_NUM_ENTRIES
+ (index >= max_naddr
? 0xffff
: index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ if (index < max_naddr)
nfilters++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -1118,6 +1121,9 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
+ unsigned int max_naddr = is_t4(adapter->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
/*
* If this is a new allocation, determine whether it should be
@@ -1140,7 +1146,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
- if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+ if (ret >= max_naddr)
ret = -ENOMEM;
}
return ret;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 13844695778..aaa0499aa19 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -478,9 +478,6 @@ dma_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
- /* I don't think we want to do this to a stressed system */
- cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
- dev->name);
dev->stats.rx_dropped++;
/* AKPM: advance bp to the next frame */
@@ -731,9 +728,6 @@ net_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
-#if 0 /* Again, this seems a cruel thing to do */
- pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
-#endif
dev->stats.rx_dropped++;
return;
}
@@ -1978,18 +1972,6 @@ static struct platform_driver cs89x0_driver = {
.remove = cs89x0_platform_remove,
};
-static int __init cs89x0_init(void)
-{
- return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe);
-}
-
-module_init(cs89x0_init);
-
-static void __exit cs89x0_cleanup(void)
-{
- platform_driver_unregister(&cs89x0_driver);
-}
-
-module_exit(cs89x0_cleanup);
+module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 354cbb78ed5..67b0388b6e6 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -887,18 +887,7 @@ static struct platform_driver ep93xx_eth_driver = {
},
};
-static int __init ep93xx_eth_init_module(void)
-{
- printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
- return platform_driver_register(&ep93xx_eth_driver);
-}
-
-static void __exit ep93xx_eth_cleanup_module(void)
-{
- platform_driver_unregister(&ep93xx_eth_driver);
-}
+module_platform_driver(ep93xx_eth_driver);
-module_init(ep93xx_eth_init_module);
-module_exit(ep93xx_eth_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8cdf02503d1..f38f677f842 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1687,22 +1687,7 @@ static struct platform_driver dm9000_driver = {
.remove = dm9000_drv_remove,
};
-static int __init
-dm9000_init(void)
-{
- printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
-
- return platform_driver_register(&dm9000_driver);
-}
-
-static void __exit
-dm9000_cleanup(void)
-{
- platform_driver_unregister(&dm9000_driver);
-}
-
-module_init(dm9000_init);
-module_exit(dm9000_cleanup);
+module_platform_driver(dm9000_driver);
MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
MODULE_DESCRIPTION("Davicom DM9000 network driver");
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 88feced9a62..cdbcd164314 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
private->rx_buffer = dma_alloc_coherent(d, 8192,
&private->rx_dma_handle,
GFP_KERNEL);
- if (private->rx_buffer == NULL) {
- pr_err("%s: no memory for rx buffer\n", __func__);
+ if (private->rx_buffer == NULL)
goto rx_buf_fail;
- }
+
private->tx_buffer = dma_alloc_coherent(d, 8192,
&private->tx_dma_handle,
GFP_KERNEL);
- if (private->tx_buffer == NULL) {
- pr_err("%s: no memory for tx buffer\n", __func__);
+ if (private->tx_buffer == NULL)
goto tx_buf_fail;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 110d26f4c60..afa8e3af2c4 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -580,12 +580,9 @@ alloc_list (struct net_device *dev)
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (skb == NULL) {
- printk (KERN_ERR
- "%s: alloc_list: allocate Rx buffer error! ",
- dev->name);
+ if (skb == NULL)
break;
- }
+
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 29aff55f2ee..2e2700e3a5a 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 3c9b4f12e3e..f286ad2da1f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -2667,10 +2667,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
&cmd.dma, GFP_KERNEL);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ if (!cmd.va)
return -ENOMEM;
- }
spin_lock_bh(&adapter->mcc_lock);
@@ -3202,6 +3200,31 @@ err:
return status;
}
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_intr_set *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
+ wrb, NULL);
+
+ req->intr_enabled = intr_enable;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 96970860c91..f2af8551721 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -188,6 +188,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1791,6 +1792,12 @@ struct be_cmd_enable_disable_vf {
u8 rsvd[3];
};
+struct be_cmd_req_intr_set {
+ struct be_cmd_req_hdr hdr;
+ u8 intr_enabled;
+ u8 rsvd[3];
+};
+
static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
{
return flags & adapter->cmd_privileges ? true : false;
@@ -1938,3 +1945,4 @@ extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
extern int be_cmd_get_if_id(struct be_adapter *adapter,
struct be_vf_cfg *vf_cfg, int vf_num);
extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 76b302f30c8..07b7f27cb0b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -719,10 +719,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL);
- if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ if (!ddrdma_cmd.va)
return -ENOMEM;
- }
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@ -845,11 +843,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL);
- if (!eeprom_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure. Could not read eeprom\n");
+ if (!eeprom_cmd.va)
return -ENOMEM;
- }
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 62dc220695f..89e6d8cfaf0 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08e54f3d288..536afa2fb94 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -146,20 +146,16 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->entry_size = entry_size;
mem->size = len * entry_size;
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!mem->va)
return -ENOMEM;
- memset(mem->va, 0, mem->size);
return 0;
}
-static void be_intr_set(struct be_adapter *adapter, bool enable)
+static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_error)
- return;
-
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
&reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
@@ -175,6 +171,22 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
+static void be_intr_set(struct be_adapter *adapter, bool enable)
+{
+ int status = 0;
+
+ /* On lancer interrupts can't be controlled via this register */
+ if (lancer_chip(adapter))
+ return;
+
+ if (adapter->eeh_error)
+ return;
+
+ status = be_cmd_intr_set(adapter, enable);
+ if (status)
+ be_reg_intr_set(adapter, enable);
+}
+
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
@@ -2435,9 +2447,6 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
for_all_evt_queues(adapter, eqo, i)
napi_disable(&eqo->napi);
@@ -2525,9 +2534,6 @@ static int be_open(struct net_device *netdev)
be_irq_register(adapter);
- if (!lancer_chip(adapter))
- be_intr_set(adapter, true);
-
for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, true, 0);
@@ -2562,10 +2568,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (cmd.va == NULL)
return -1;
- memset(cmd.va, 0, cmd.size);
if (enable) {
status = pci_write_config_dword(adapter->pdev,
@@ -3457,11 +3462,9 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
goto lancer_fw_exit;
}
@@ -3563,8 +3566,6 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
goto be_fw_exit;
}
@@ -3791,12 +3792,13 @@ static int be_ctrl_init(struct be_adapter *adapter)
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
+ &rx_filter->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
}
- memset(rx_filter->va, 0, rx_filter->size);
+
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -3838,10 +3840,9 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (cmd->va == NULL)
return -1;
- memset(cmd->va, 0, cmd->size);
return 0;
}
@@ -3853,6 +3854,7 @@ static void be_remove(struct pci_dev *pdev)
return;
be_roce_dev_remove(adapter);
+ be_intr_set(adapter, false);
cancel_delayed_work_sync(&adapter->func_recovery_work);
@@ -4142,11 +4144,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
goto ctrl_clean;
}
- /* The INTR bit may be set in the card when probed by a kdump kernel
- * after a crash.
- */
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
+ /* Wait for interrupts to quiesce after an FLR */
+ msleep(100);
+
+ /* Allow interrupts for other ULPs running on NIC function */
+ be_intr_set(adapter, true);
status = be_stats_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 55d32aa0a09..f3d126dcc10 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index db4ea8081c0..27657299846 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 7c361d1db94..21b85fb7d05 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
priv->descs = dma_alloc_coherent(priv->dev,
sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
+ &priv->descs_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!priv->descs)
return -ENOMEM;
- memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
-
/* initialize RX ring */
ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
@@ -1350,22 +1349,7 @@ static struct platform_driver ftgmac100_driver = {
},
};
-/******************************************************************************
- * initialization / finalization
- *****************************************************************************/
-static int __init ftgmac100_init(void)
-{
- pr_info("Loading version " DRV_VERSION " ...\n");
- return platform_driver_register(&ftgmac100_driver);
-}
-
-static void __exit ftgmac100_exit(void)
-{
- platform_driver_unregister(&ftgmac100_driver);
-}
-
-module_init(ftgmac100_init);
-module_exit(ftgmac100_exit);
+module_platform_driver(ftgmac100_driver);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
MODULE_DESCRIPTION("FTGMAC100 driver");
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b5ea8fbd8a7..a6eda8d8313 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!priv->descs)
return -ENOMEM;
- memset(priv->descs, 0, sizeof(struct ftmac100_descs));
-
/* initialize RX ring */
ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 911d0253dbb..186222ef101 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
@@ -743,8 +742,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
if (unlikely(!skb)) {
- printk("%s: Memory squeeze, dropping packet.\n",
- ndev->name);
ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
@@ -1602,11 +1599,9 @@ static int fec_enet_init(struct net_device *ndev)
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
- GFP_KERNEL);
- if (!cbd_base) {
- printk("FEC: allocate descriptor memory failed?\n");
+ GFP_KERNEL);
+ if (!cbd_base)
return -ENOMEM;
- }
spin_lock_init(&fep->hw_lock);
@@ -1739,16 +1734,10 @@ fec_probe(struct platform_device *pdev)
if (!r)
return -ENXIO;
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (!r)
- return -EBUSY;
-
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!ndev) {
- ret = -ENOMEM;
- goto failed_alloc_etherdev;
- }
+ if (!ndev)
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1760,7 +1749,7 @@ fec_probe(struct platform_device *pdev)
(pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
- fep->hwp = ioremap(r->start, resource_size(r));
+ fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
fep->pdev = pdev;
fep->dev_id = dev_id++;
@@ -1882,11 +1871,8 @@ failed_regulator:
clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
- iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
-failed_alloc_etherdev:
- release_mem_region(r->start, resource_size(r));
return ret;
}
@@ -1896,7 +1882,6 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct resource *r;
int i;
unregister_netdev(ndev);
@@ -1912,13 +1897,8 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
- iounmap(fep->hwp);
free_netdev(ndev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- BUG_ON(!r);
- release_mem_region(r->start, resource_size(r));
-
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 46df28893c1..edc120094c3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
received++;
netif_receive_skb(skb);
} else {
- dev_warn(fep->dev,
- "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
received++;
netif_rx(skb);
} else {
- dev_warn(fep->dev,
- "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev)
*/
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skb == NULL) {
- dev_warn(fep->dev,
- "Memory squeeze, unable to allocate skb\n");
+ if (skb == NULL)
break;
- }
+
skb_align(skb, ENET_RX_ALIGN);
fep->rx_skbuff[i] = skb;
CBDW_BUFADDR(bdp,
@@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
- if (!new_skb) {
- if (net_ratelimit()) {
- dev_warn(fep->dev,
- "Memory squeeze, dropping tx packet.\n");
- }
+ if (!new_skb)
return NULL;
- }
/* Make sure new skb is properly aligned */
skb_align(new_skb, 4);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2c5441d1bf..1b468a82a68 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- &addr, GFP_KERNEL);
- if (!vaddr) {
- netif_err(priv, ifup, ndev,
- "Could not allocate buffer descriptors!\n");
+ (priv->total_tx_ring_size *
+ sizeof(struct txbd8)) +
+ (priv->total_rx_ring_size *
+ sizeof(struct rxbd8)),
+ &addr, GFP_KERNEL);
+ if (!vaddr)
return -ENOMEM;
- }
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 2418faf2251..84125707f32 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1003,8 +1003,6 @@ static void fjn_rx(struct net_device *dev)
}
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
- pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_dropped++;
break;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 1c54e229e3c..e3881614539 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev)
#ifdef __mc68000__
cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
#endif
- }
- else
+ } else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
+ }
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
- }
- else {
+ } else {
if (!rx_in_place) {
/* 16 byte align the data fields */
skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f045ea4dc51..d653bac4cfc 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev)
rbd->v_data = newskb->data;
rbd->b_data = SWAP32(dma_addr);
DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
- } else
+ } else {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ }
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk(KERN_ERR
- "%s: i596_rx Memory squeeze, dropping packet.\n",
- dev->name);
dev->stats.rx_dropped++;
} else {
if (!rx_in_place) {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 328f47c92e2..02963343447 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
- netdev_info(dev, "Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i]) {
- netdev_info(dev, "Not enough memory to allocate skb array\n");
+ if (!skb_arr_rq1[i])
break;
- }
}
/* Ring doorbell */
ehea_update_rq1a(pr->qp, i - 1);
@@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb) {
- netdev_err(dev, "Not enough memory to allocate skb\n");
+ if (!skb)
break;
- }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 1f7ecf57181..610ed223d1d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,17 +637,12 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt =
- dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (mal->bd_virt == NULL) {
- printk(KERN_ERR
- "mal%d: out of memory allocating RX/TX descriptors!\n",
- index);
err = -ENOMEM;
goto fail_unmap;
}
- memset(mal->bd_virt, 0, bd_size);
for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c859771a990..302d5940106 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
rxq_entries;
adapter->rx_queue.queue_addr =
- dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
- &adapter->rx_queue.queue_dma, GFP_KERNEL);
-
+ dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
+ &adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) {
- netdev_err(netdev, "unable to allocate rx queue pages\n");
rc = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 43462d596a4..a9f9c790676 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
}
- memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = txdr->next_to_clean = 0;
ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
@@ -1075,12 +1074,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!rxdr->desc) {
ret_val = 5;
goto err_nomem;
}
- memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_use = rxdr->next_to_clean = 0;
rctl = er32(RCTL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8502c625dbe..d98e1d0996d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1516,8 +1516,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
return -ENOMEM;
}
@@ -1707,10 +1705,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
-
if (!rxdr->desc) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1729,8 +1724,6 @@ setup_rx_desc_die:
if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
- e_err(probe, "Unable to allocate memory for the Rx "
- "descriptor ring\n");
goto setup_rx_desc_die;
}
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e0991388664..b71c8502a2b 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -37,7 +37,9 @@
* "index + 5".
*/
static const u16 e1000_gg82563_cable_length_table[] = {
- 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
+};
+
#define GG82563_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_gg82563_cable_length_table)
@@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
- udelay(200);
+ usleep_range(200, 400);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY;
}
- udelay(200);
+ usleep_range(200, 400);
ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
- udelay(200);
+ usleep_range(200, 400);
} else {
ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
}
e1000_release_phy_80003es2lan(hw);
@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
- udelay(200);
+ usleep_range(200, 400);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY;
}
- udelay(200);
+ usleep_range(200, 400);
ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
- udelay(200);
+ usleep_range(200, 400);
} else {
ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
}
e1000_release_phy_80003es2lan(hw);
@@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
}
@@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
s32 ret_val;
if (hw->phy.media_type == e1000_media_type_copper) {
- ret_val = e1000e_get_speed_and_duplex_copper(hw,
- speed,
- duplex);
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
hw->phy.ops.cfg_on_link_up(hw);
} else {
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
- speed,
- duplex);
+ speed,
+ duplex);
}
return ret_val;
@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */
@@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* default to true to enable the MDIC W/A */
hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET >>
- E1000_KMRNCTRLSTA_OFFSET_SHIFT,
- &i);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
if (!ret_val) {
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
- E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
- u32 ctrl_ext;
+ u32 reg;
u16 data;
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
}
/* Bypass Rx and Tx FIFO's */
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
- E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
- E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- &data);
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
if (ret_val)
return ret_val;
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- data);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val)
return ret_val;
@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
- ew32(CTRL_EXT, ctrl_ext);
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+ ew32(CTRL_EXT, reg);
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
if (ret_val)
@@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
+ 0xFFFF);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
+ reg_data);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- &reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
- &duplex);
+ &duplex);
if (ret_val)
return ret_val;
@@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
u16 reg_data, reg_data2;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
u32 i = 0;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
.phy_ops = &es2_phy_ops,
.nvm_ops = &es2_nvm_ops,
};
-
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2faffbde179..7380442a382 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
default:
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usleep_range(50, 100);
i++;
}
@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usleep_range(50, 100);
}
if (i == fw_timeout) {
@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
+
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
- eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
- ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
- E1000_NVM_RW_REG_START;
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
s32 timeout = PHY_CFG_TIMEOUT;
while (timeout) {
- if (er32(EEMNGCTL) &
- E1000_NVM_CFG_DONE_PORT_0)
+ if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
break;
usleep_range(1000, 2000);
timeout--;
@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
}
if (hw->nvm.type == e1000_nvm_flash_hw) {
- udelay(10);
+ usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break;
default:
reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
break;
}
@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
status = er32(STATUS);
er32(RXCW);
/* SYNCH bit and IV bit are sticky */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* the IV bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) &&
(rxcw & E1000_RXCW_C))
@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
-
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 85cb1a3b7cd..08e24dc3dc0 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -44,6 +44,8 @@
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+
/* Manageability Operation Mode mask */
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index fc3a4fe1ac7..b7c664f2a95 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -66,7 +66,7 @@
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -216,6 +216,8 @@
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
@@ -239,12 +241,11 @@
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
-
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
@@ -311,6 +312,7 @@
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
@@ -400,7 +402,8 @@
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
-#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
@@ -583,13 +586,13 @@
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
-#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
-#define E1000_NVM_RW_REG_START 1 /* Start operation */
-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define E1000_FLASH_UPDATES 2000
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
+#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index fcc758138b8..e371f771cf8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -61,7 +61,6 @@ struct e1000_info;
#define e_notice(format, arg...) \
netdev_notice(adapter->netdev, format, ## arg)
-
/* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1
@@ -239,9 +238,8 @@ struct e1000_adapter {
u16 tx_itr;
u16 rx_itr;
- /* Tx */
- struct e1000_ring *tx_ring /* One per active queue */
- ____cacheline_aligned_in_smp;
+ /* Tx - one ring per active queue */
+ struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi;
@@ -487,8 +485,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
extern void e1000e_free_rx_resources(struct e1000_ring *ring);
extern void e1000e_free_tx_resources(struct e1000_ring *ring);
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
@@ -558,12 +556,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
return hw->nvm.ops.update(hw);
}
-static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
return hw->nvm.ops.read(hw, offset, words, data);
}
-static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
return hw->nvm.ops.write(hw, offset, words, data);
}
@@ -597,7 +597,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
+ usleep_range(50, 100);
return i;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f91a8f3f9d4..e835e7b95f8 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -40,7 +40,7 @@
#include "e1000.h"
-enum {NETDEV_STATS, E1000_STATS};
+enum { NETDEV_STATS, E1000_STATS };
struct e1000_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -121,6 +121,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
+
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
static int e1000_get_settings(struct net_device *netdev,
@@ -197,8 +198,7 @@ static int e1000_get_settings(struct net_device *netdev,
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
- ETH_TP_MDI;
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -224,8 +224,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
- spd != SPEED_1000 &&
- dplx != DUPLEX_FULL) {
+ (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
goto err_inval;
}
@@ -298,12 +297,10 @@ static int e1000_set_settings(struct net_device *netdev,
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
+ ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP | ADVERTISED_Autoneg;
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
@@ -346,7 +343,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
pause->autoneg =
- (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->fc.current_mode == e1000_fc_rx_pause) {
pause->rx_pause = 1;
@@ -435,7 +432,7 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -503,8 +500,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
@@ -515,7 +512,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
} else {
for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = e1000_read_nvm(hw, first_word + i, 1,
- &eeprom_buff[i]);
+ &eeprom_buff[i]);
if (ret_val)
break;
}
@@ -553,7 +550,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
+ if (eeprom->magic !=
+ (adapter->pdev->vendor | (adapter->pdev->device << 16)))
return -EFAULT;
if (adapter->flags & FLAG_READ_ONLY_NVM)
@@ -579,7 +577,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
ret_val = e1000_read_nvm(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
+ &eeprom_buff[last_word - first_word]);
if (ret_val)
goto out;
@@ -618,8 +616,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version));
@@ -627,10 +624,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
* PCI-E controllers
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
- (adapter->eeprom_vers & 0x000F));
+ "%d.%d-%d",
+ (adapter->eeprom_vers & 0xF000) >> 12,
+ (adapter->eeprom_vers & 0x0FF0) >> 4,
+ (adapter->eeprom_vers & 0x000F));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -756,7 +753,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
{
u32 pat, val;
static const u32 test[] = {
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
(test[pat] & write));
@@ -786,6 +784,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
}
return 0;
}
+
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
do { \
if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
@@ -813,16 +812,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 wlock_mac = 0;
/* The status register is Read Only, so a write should fail.
- * Some bits that get toggled are ignored.
+ * Some bits that get toggled are ignored. There are several bits
+ * on newer hardware that are r/w.
*/
switch (mac->type) {
- /* there are several bits on newer hardware that are r/w */
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
toggle = 0x7FFFF3FF;
break;
- default:
+ default:
toggle = 0x7FFFF033;
break;
}
@@ -928,7 +927,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) NVM_SUM) && !(*data))
+ if ((checksum != (u16)NVM_SUM) && !(*data))
*data = 2;
return *data;
@@ -936,7 +935,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
{
- struct net_device *netdev = (struct net_device *) data;
+ struct net_device *netdev = (struct net_device *)data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -969,8 +968,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev)) {
shared_int = 0;
- } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
+ netdev)) {
*data = 1;
ret_val = -1;
goto out;
@@ -1080,28 +1079,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
struct e1000_ring *tx_ring = &adapter->test_tx_ring;
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
int i;
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
- if (tx_ring->buffer_info[i].dma)
+ buffer_info = &tx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
dma_unmap_single(&pdev->dev,
- tx_ring->buffer_info[i].dma,
- tx_ring->buffer_info[i].length,
- DMA_TO_DEVICE);
- if (tx_ring->buffer_info[i].skb)
- dev_kfree_skb(tx_ring->buffer_info[i].skb);
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
}
}
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
- if (rx_ring->buffer_info[i].dma)
+ buffer_info = &rx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
dma_unmap_single(&pdev->dev,
- rx_ring->buffer_info[i].dma,
- 2048, DMA_FROM_DEVICE);
- if (rx_ring->buffer_info[i].skb)
- dev_kfree_skb(rx_ring->buffer_info[i].skb);
+ buffer_info->dma,
+ 2048, DMA_FROM_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1138,8 +1142,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->count = E1000_DEFAULT_TXD;
tx_ring->buffer_info = kcalloc(tx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
+ sizeof(struct e1000_buffer), GFP_KERNEL);
if (!tx_ring->buffer_info) {
ret_val = 1;
goto err_nomem;
@@ -1156,8 +1159,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
+ ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
ew32(TDH(0), 0);
ew32(TDT(0), 0);
@@ -1179,8 +1182,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev,
tx_ring->buffer_info[i].dma)) {
ret_val = 4;
@@ -1200,8 +1203,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->count = E1000_DEFAULT_RXD;
rx_ring->buffer_info = kcalloc(rx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
+ sizeof(struct e1000_buffer), GFP_KERNEL);
if (!rx_ring->buffer_info) {
ret_val = 5;
goto err_nomem;
@@ -1220,16 +1222,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rctl = er32(RCTL);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
- ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
+ ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
+ ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
ew32(RDLEN(0), rx_ring->size);
ew32(RDH(0), 0);
ew32(RDT(0), 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
- E1000_RCTL_SBP | E1000_RCTL_SECRC |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
ew32(RCTL, rctl);
for (i = 0; i < rx_ring->count; i++) {
@@ -1244,8 +1246,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, 2048,
- DMA_FROM_DEVICE);
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev,
rx_ring->buffer_info[i].dma)) {
ret_val = 8;
@@ -1296,7 +1298,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
e1e_flush();
- udelay(500);
+ usleep_range(500, 1000);
return 0;
}
@@ -1322,7 +1324,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
/* Assert SW reset for above settings to take effect */
hw->phy.ops.commit(hw);
- mdelay(1);
+ usleep_range(1000, 2000);
/* Force Full Duplex */
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
@@ -1363,7 +1365,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* force 1000, set loopback */
e1e_wphy(hw, MII_BMCR, 0x4140);
- mdelay(250);
+ msleep(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@@ -1395,7 +1397,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
if (hw->phy.type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
- udelay(500);
+ usleep_range(500, 1000);
return 0;
}
@@ -1431,8 +1433,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special write to serdes control register to enable SerDes analog
* loopback
*/
-#define E1000_SERDES_LB_ON 0x410
- ew32(SCTL, E1000_SERDES_LB_ON);
+ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
@@ -1526,8 +1527,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_82572:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
-#define E1000_SERDES_LB_OFF 0x400
- ew32(SCTL, E1000_SERDES_LB_OFF);
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
break;
@@ -1564,7 +1564,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
frame_size &= ~1;
if (*(skb->data + 3) == 0xFF)
if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
return 0;
return 13;
}
@@ -1575,6 +1575,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
int i, j, k, l;
int lc;
int good_cnt;
@@ -1595,14 +1596,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
k = 0;
l = 0;
- for (j = 0; j <= lc; j++) { /* loop count loop */
- for (i = 0; i < 64; i++) { /* send the packets */
- e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
- 1024);
+ /* loop count loop */
+ for (j = 0; j <= lc; j++) {
+ /* send the packets */
+ for (i = 0; i < 64; i++) {
+ buffer_info = &tx_ring->buffer_info[k];
+
+ e1000_create_lbtest_frame(buffer_info->skb, 1024);
dma_sync_single_for_device(&pdev->dev,
- tx_ring->buffer_info[k].dma,
- tx_ring->buffer_info[k].length,
- DMA_TO_DEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1612,13 +1616,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
- do { /* receive the sent packets */
+ /* receive the sent packets */
+ do {
+ buffer_info = &rx_ring->buffer_info[l];
+
dma_sync_single_for_cpu(&pdev->dev,
- rx_ring->buffer_info[l].dma, 2048,
- DMA_FROM_DEVICE);
+ buffer_info->dma, 2048,
+ DMA_FROM_DEVICE);
- ret_val = e1000_check_lbtest_frame(
- rx_ring->buffer_info[l].skb, 1024);
+ ret_val = e1000_check_lbtest_frame(buffer_info->skb,
+ 1024);
if (!ret_val)
good_cnt++;
l++;
@@ -1637,7 +1644,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 14; /* error code for time out error */
break;
}
- } /* end loop count loop */
+ }
return ret_val;
}
@@ -1696,7 +1703,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
/* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
- msleep(5000);
+ msleep_interruptible(5000);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
@@ -1980,12 +1987,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) &net_stats +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)&net_stats +
+ e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
- p = (char *) adapter +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)adapter +
+ e1000_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
@@ -1993,7 +2000,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1e6b889aee8..84850f7a23e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
e1000_1000t_rx_status_undefined = 0xFF
};
-enum e1000_rev_polarity{
+enum e1000_rev_polarity {
e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF
@@ -545,7 +545,7 @@ struct e1000_mac_info {
u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
+#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 121a865c7fb..1cdec5fd212 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -61,15 +61,15 @@
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
struct ich8_hsfsts {
- u16 flcdone :1; /* bit 0 Flash Cycle Done */
- u16 flcerr :1; /* bit 1 Flash Cycle Error */
- u16 dael :1; /* bit 2 Direct Access error Log */
- u16 berasesz :2; /* bit 4:3 Sector Erase Size */
- u16 flcinprog :1; /* bit 5 flash cycle in Progress */
- u16 reserved1 :2; /* bit 13:6 Reserved */
- u16 reserved2 :6; /* bit 13:6 Reserved */
- u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
- u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */
+ u16 dael:1; /* bit 2 Direct Access error Log */
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */
+ u16 reserved1:2; /* bit 13:6 Reserved */
+ u16 reserved2:6; /* bit 13:6 Reserved */
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
} hsf_status;
u16 regval;
};
@@ -78,11 +78,11 @@ union ich8_hws_flash_status {
/* Offset 06h FLCTL */
union ich8_hws_flash_ctrl {
struct ich8_hsflctl {
- u16 flcgo :1; /* 0 Flash Cycle Go */
- u16 flcycle :2; /* 2:1 Flash Cycle */
- u16 reserved :5; /* 7:3 Reserved */
- u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
- u16 flockdn :6; /* 15:10 Reserved */
+ u16 flcgo:1; /* 0 Flash Cycle Go */
+ u16 flcycle:2; /* 2:1 Flash Cycle */
+ u16 reserved:5; /* 7:3 Reserved */
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn:6; /* 15:10 Reserved */
} hsf_ctrl;
u16 regval;
};
@@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl {
/* ICH Flash Region Access Permissions */
union ich8_hws_flash_regacc {
struct ich8_flracc {
- u32 grra :8; /* 0:7 GbE region Read Access */
- u32 grwa :8; /* 8:15 GbE region Write Access */
- u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
- u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
+ u32 grra:8; /* 0:7 GbE region Read Access */
+ u32 grwa:8; /* 8:15 GbE region Write Access */
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
} hsf_flregacc;
u16 regval;
};
@@ -312,7 +312,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, mac_reg);
e1e_flush();
- udelay(10);
+ usleep_range(10, 20);
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, mac_reg);
e1e_flush();
@@ -548,8 +548,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
- nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT;
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
nvm->flash_bank_size /= 2;
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
@@ -1134,9 +1134,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
- ((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+ return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
}
/**
@@ -1153,7 +1153,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
fwsm = er32(FWSM);
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
- (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1440,8 +1440,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
word_addr = (u16)(cnf_base_addr << 1);
for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
if (ret_val)
goto release;
@@ -1501,13 +1500,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (status_reg == (BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_1000))
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
k1_enable = false;
}
@@ -1516,13 +1515,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_MASK;
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
if (status_reg == (HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_1000))
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
k1_enable = false;
}
@@ -1579,7 +1578,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
if (ret_val)
return ret_val;
- udelay(20);
+ usleep_range(20, 40);
ctrl_ext = er32(CTRL_EXT);
ctrl_reg = er32(CTRL);
@@ -1589,11 +1588,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
e1e_flush();
- udelay(20);
+ usleep_range(20, 40);
ew32(CTRL, ctrl_reg);
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
- udelay(20);
+ usleep_range(20, 40);
return 0;
}
@@ -1667,7 +1666,6 @@ release:
return ret_val;
}
-
/**
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
* @hw: pointer to the HW structure
@@ -1834,7 +1832,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
* SHRAL/H) and initial CRC values to the MAC
*/
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ALEN] = { 0 };
u32 addr_high, addr_low;
addr_high = er32(RAH(i));
@@ -1865,8 +1863,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_CTRL_OFFSET,
- &data);
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
if (ret_val)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
@@ -1875,8 +1873,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_HD_CTRL,
- &data);
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
if (ret_val)
return ret_val;
data &= ~(0xF << 8);
@@ -1923,8 +1921,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_CTRL_OFFSET,
- &data);
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
if (ret_val)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
@@ -1933,8 +1931,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_HD_CTRL,
- &data);
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
if (ret_val)
return ret_val;
data &= ~(0xF << 8);
@@ -2100,7 +2098,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
do {
data = er32(STATUS);
data &= E1000_STATUS_LAN_INIT_DONE;
- udelay(100);
+ usleep_range(100, 200);
} while ((!data) && --loop);
/* If basic configuration is incomplete before the above loop
@@ -2445,7 +2443,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 0 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
- &sig_byte);
+ &sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2456,8 +2454,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 1 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
- bank1_offset,
- &sig_byte);
+ bank1_offset,
+ &sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2510,8 +2508,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = 0;
for (i = 0; i < words; i++) {
- if (dev_spec->shadow_ram[offset+i].modified) {
- data[i] = dev_spec->shadow_ram[offset+i].value;
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] = dev_spec->shadow_ram[offset + i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw,
act_offset + i,
@@ -2696,8 +2694,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
udelay(1);
@@ -2714,8 +2712,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_READ_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
@@ -2774,8 +2773,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
nvm->ops.acquire(hw);
for (i = 0; i < words; i++) {
- dev_spec->shadow_ram[offset+i].modified = true;
- dev_spec->shadow_ram[offset+i].value = data[i];
+ dev_spec->shadow_ram[offset + i].modified = true;
+ dev_spec->shadow_ram[offset + i].value = data[i];
}
nvm->ops.release(hw);
@@ -2844,8 +2843,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
data = dev_spec->shadow_ram[i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw, i +
- old_bank_offset,
- &data);
+ old_bank_offset,
+ &data);
if (ret_val)
break;
}
@@ -2863,7 +2862,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
/* Convert offset to bytes. */
act_offset = (i + new_bank_offset) << 1;
- udelay(100);
+ usleep_range(100, 200);
/* Write the bytes to the new bank. */
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset,
@@ -2871,10 +2870,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
break;
- udelay(100);
+ usleep_range(100, 200);
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
- act_offset + 1,
- (u8)(data >> 8));
+ act_offset + 1,
+ (u8)(data >> 8));
if (ret_val)
break;
}
@@ -3050,8 +3049,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
udelay(1);
@@ -3062,7 +3061,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
- hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -3078,8 +3077,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
if (!ret_val)
break;
@@ -3138,7 +3138,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
for (program_retries = 0; program_retries < 100; program_retries++) {
e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
- udelay(100);
+ usleep_range(100, 200);
ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
if (!ret_val)
break;
@@ -3209,8 +3209,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr = hw->nvm.flash_base_addr;
flash_linear_addr += (bank) ? flash_bank_size : 0;
- for (j = 0; j < iteration ; j++) {
+ for (j = 0; j < iteration; j++) {
do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
/* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val)
@@ -3230,8 +3232,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr += (j * sector_size);
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
if (!ret_val)
break;
@@ -3270,8 +3271,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
return ret_val;
}
- if (*data == ID_LED_RESERVED_0000 ||
- *data == ID_LED_RESERVED_FFFF)
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT_ICH8LAN;
return 0;
@@ -3511,9 +3511,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Setup the receive address. */
e1000e_init_rx_addrs(hw, mac->rar_entry_count);
@@ -3541,16 +3541,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy for both queues */
txdctl = er32(TXDCTL(0));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(0), txdctl);
txdctl = er32(TXDCTL(1));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(1), txdctl);
/* ICH8 has opposite polarity of no_snoop bits.
@@ -3559,7 +3559,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
snoop = PCIE_ICH8_SNOOP_ALL;
else
- snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+ snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
ctrl_ext = er32(CTRL_EXT);
@@ -3575,6 +3575,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+
/**
* e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
* @hw: pointer to the HW structure
@@ -3686,8 +3687,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
*/
hw->fc.current_mode = hw->fc.requested_mode;
- e_dbg("After fix-ups FlowControl is now = %x\n",
- hw->fc.current_mode);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
/* Continue to configure the copper link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
@@ -3737,12 +3737,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &reg_data);
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- reg_data);
+ reg_data);
if (ret_val)
return ret_val;
@@ -3815,8 +3815,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
return ret_val;
if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3) &&
- (*speed == SPEED_1000)) {
+ (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
}
@@ -3899,7 +3898,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
* /disabled - false).
**/
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state)
+ bool state)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
@@ -3981,12 +3980,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
return;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- &reg_data);
+ &reg_data);
if (ret_val)
return;
reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- reg_data);
+ reg_data);
if (ret_val)
return;
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b78e0217460..b25cc4300de 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* serdes media type.
*/
/* SYNCH bit and IV bit are sticky. */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
/* SYNCH bit and IV bit are sticky, so reread rxcw. */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usleep_range(50, 100);
i++;
}
@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usleep_range(50, 100);
}
if (i == timeout) {
@@ -1712,7 +1712,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
while (timeout) {
if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break;
- udelay(100);
+ usleep_range(100, 200);
timeout--;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 948b86ffa4f..142ca39a68f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
@@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
cpu_to_le64(ps_page->dma);
}
- skb = __netdev_alloc_skb_ip_align(netdev,
- adapter->rx_ps_bsize0,
+ skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
gfp);
if (!skb) {
@@ -850,8 +848,8 @@ check_page:
if (!buffer_info->dma)
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
- PAGE_SIZE,
+ buffer_info->page, 0,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@@ -937,10 +935,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
cleaned = true;
cleaned_count++;
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->wb.upper.length);
@@ -1068,8 +1064,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
static void e1000_print_hw_hang(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter,
- print_hang_task);
+ struct e1000_adapter,
+ print_hang_task);
struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
@@ -1082,8 +1078,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- if (!adapter->tx_hang_recheck &&
- (adapter->flags2 & FLAG2_DMA_BURST)) {
+ if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
/* May be block on write-back, flush and detect again
* flush pending descriptor writebacks to memory
*/
@@ -1125,19 +1120,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
"PHY 1000BASE-T Status <%x>\n"
"PHY Extended Status <%x>\n"
"PCI Status <%x>\n",
- readl(tx_ring->head),
- readl(tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->upper.fields.status,
- er32(STATUS),
- phy_status,
- phy_1000t_status,
- phy_ext_status,
- pci_status);
+ readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
+ tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
+ eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
+ phy_status, phy_1000t_status, phy_ext_status, pci_status);
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
@@ -1430,7 +1416,7 @@ copydone:
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
if (rx_desc->wb.upper.header_status &
- cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
e1000_receive_skb(adapter, netdev, skb, staterr,
@@ -1468,7 +1454,7 @@ next_desc:
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -1495,7 +1481,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
unsigned int i;
int cleaned_count = 0;
bool cleaned = false;
- unsigned int total_rx_bytes=0, total_rx_packets=0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct skb_shared_info *shinfo;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@@ -1541,7 +1528,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
-
#define rxtop (rx_ring->rx_skb_top)
if (!(staterr & E1000_RXD_STAT_EOP)) {
/* this descriptor is only the beginning (or middle) */
@@ -1549,12 +1535,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
/* re-use the skb, only consumed the page */
buffer_info->skb = skb;
}
@@ -1563,9 +1550,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
} else {
if (rxtop) {
/* end of the chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
/* re-use the current skb, we only consumed the
* page
*/
@@ -1590,10 +1578,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
@@ -1666,8 +1654,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
dma_unmap_page(&pdev->dev, buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ PAGE_SIZE, DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
@@ -1720,7 +1707,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
static void e1000e_downshift_workaround(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, downshift_task);
+ struct e1000_adapter,
+ downshift_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -1913,7 +1901,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
-
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1970,7 +1957,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ew32(RFCTL, rfctl);
}
-#define E1000_IVAR_INT_ALLOC_VALID 0x8
/* Configure Rx vector */
rx_ring->ims_val = E1000_IMS_RXQ0;
adapter->eiac_mask |= rx_ring->ims_val;
@@ -2045,8 +2031,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_HAS_MSIX) {
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
adapter->msix_entries = kcalloc(adapter->num_vectors,
- sizeof(struct msix_entry),
- GFP_KERNEL);
+ sizeof(struct
+ msix_entry),
+ GFP_KERNEL);
if (adapter->msix_entries) {
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
@@ -2490,7 +2477,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
switch (itr_setting) {
case lowest_latency:
/* handle TSO and jumbo frames */
- if (bytes/packets > 8000)
+ if (bytes / packets > 8000)
retval = bulk_latency;
else if ((packets < 5) && (bytes > 512))
retval = low_latency;
@@ -2498,13 +2485,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000)
+ if (bytes / packets > 8000)
retval = bulk_latency;
- else if ((packets < 10) || ((bytes/packets) > 1200))
+ else if ((packets < 10) || ((bytes / packets) > 1200))
retval = bulk_latency;
else if ((packets > 35))
retval = lowest_latency;
- } else if (bytes/packets > 2000) {
+ } else if (bytes / packets > 2000) {
retval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
retval = lowest_latency;
@@ -2556,8 +2543,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
current_itr = max(adapter->rx_itr, adapter->tx_itr);
- switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
+ switch (current_itr) {
case lowest_latency:
new_itr = 70000;
break;
@@ -2578,8 +2565,7 @@ set_itr_now:
* increasing
*/
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
adapter->itr = new_itr;
adapter->rx_ring->itr_val = new_itr;
if (adapter->msix_entries)
@@ -2810,8 +2796,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
e1000_vlan_rx_add_vid(netdev, vid);
adapter->mng_vlan_id = vid;
}
@@ -2827,7 +2812,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_rx_add_vid(adapter->netdev, 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -3002,8 +2987,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Do not Store bad packets */
rctl &= ~E1000_RCTL_SBP;
@@ -3089,19 +3074,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS;
- psrctl |= adapter->rx_ps_bsize0 >>
- E1000_PSRCTL_BSIZE0_SHIFT;
+ psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
switch (adapter->rx_ps_pages) {
case 3:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE3_SHIFT;
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
+ /* fall-through */
case 2:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE2_SHIFT;
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
+ /* fall-through */
case 1:
- psrctl |= PAGE_SIZE >>
- E1000_PSRCTL_BSIZE1_SHIFT;
+ psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
break;
}
@@ -3275,7 +3258,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
/* update_mc_addr_list expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3752,8 +3735,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
- sizeof(struct e1000_tx_desc) -
- ETH_FCS_LEN) * 2;
+ sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10;
/* software strips receive CRC, so leave room for it */
@@ -3856,13 +3838,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
if ((adapter->max_frame_size * 2) > (pba << 10)) {
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned off\n");
+ "Interrupt Throttle Rate off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned on\n");
+ "Interrupt Throttle Rate on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
e1000e_write_itr(adapter, adapter->itr);
@@ -4261,8 +4243,7 @@ static int e1000_open(struct net_device *netdev)
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
@@ -4365,8 +4346,7 @@ static int e1000_close(struct net_device *netdev)
/* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it)
*/
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
/* If AMT is enabled, let the firmware know that the network
@@ -4382,6 +4362,7 @@ static int e1000_close(struct net_device *netdev)
return 0;
}
+
/**
* e1000_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -4432,7 +4413,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, update_phy_task);
+ struct e1000_adapter,
+ update_phy_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4449,7 +4431,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4616,18 +4598,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
netdev->stats.rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
+ adapter->stats.roc;
netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
netdev->stats.rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
- netdev->stats.tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
+ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
netdev->stats.tx_aborted_errors = adapter->stats.ecol;
netdev->stats.tx_window_errors = adapter->stats.latecol;
netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
@@ -4785,7 +4765,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -4796,7 +4776,8 @@ static void e1000_watchdog(unsigned long data)
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, watchdog_task);
+ struct e1000_adapter,
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -4830,8 +4811,8 @@ static void e1000_watchdog_task(struct work_struct *work)
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
e1000_print_link_info(adapter);
/* check if SmartSpeed worked */
@@ -4944,7 +4925,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
- LINK_TIMEOUT);
+ LINK_TIMEOUT);
}
}
@@ -4979,8 +4960,8 @@ link_up:
*/
u32 goc = (adapter->gotc + adapter->gorc) / 10000;
u32 dif = (adapter->gotc > adapter->gorc ?
- adapter->gotc - adapter->gorc :
- adapter->gorc - adapter->gotc) / 10000;
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
e1000e_write_itr(adapter, itr);
@@ -5059,14 +5040,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
+ 0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -5075,7 +5056,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
- E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
i = tx_ring->next_to_use;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
@@ -5145,8 +5126,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
context_desc->lower_setup.ip_config = 0;
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso =
- css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(cmd_len);
@@ -5219,7 +5199,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
- offset, size, DMA_TO_DEVICE);
+ offset, size,
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -5268,7 +5249,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (tx_flags & E1000_TX_FLAGS_TSO) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (tx_flags & E1000_TX_FLAGS_IPV4)
@@ -5299,8 +5280,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->lower.data =
- cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->lower.data = cpu_to_le32(txd_lower |
+ buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
i++;
@@ -5350,11 +5331,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
- if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+ if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
return 0;
{
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
struct udphdr *udp;
if (ip->protocol != IPPROTO_UDP)
@@ -5579,7 +5560,7 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
**/
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5600,18 +5581,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
- stats->rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
stats->rx_crc_errors = adapter->stats.crcerrs;
stats->rx_frame_errors = adapter->stats.algnerrc;
stats->rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
- stats->tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
+ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
stats->tx_aborted_errors = adapter->stats.ecol;
stats->tx_window_errors = adapter->stats.latecol;
stats->tx_carrier_errors = adapter->stats.tncrs;
@@ -5680,9 +5658,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* adjust allocation if LPE protects us, and we aren't using SBP */
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
- + ETH_FCS_LEN;
+ + ETH_FCS_LEN;
if (netif_running(netdev))
e1000e_up(adapter);
@@ -5861,7 +5839,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg &= ~(BM_RCTL_MO_MASK);
if (mac_reg & E1000_RCTL_MO_3)
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
- << BM_RCTL_MO_SHIFT);
+ << BM_RCTL_MO_SHIFT);
if (mac_reg & E1000_RCTL_BAM)
phy_reg |= BM_RCTL_BAM;
if (mac_reg & E1000_RCTL_PMCF)
@@ -5930,10 +5908,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
}
ctrl = er32(CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
@@ -5977,8 +5951,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
*/
e1000e_release_hw_control(adapter);
- pci_clear_master(pdev);
-
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
@@ -6077,24 +6049,24 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
if (phy_data) {
e_info("PHY Wakeup cause - %s\n",
- phy_data & E1000_WUS_EX ? "Unicast Packet" :
- phy_data & E1000_WUS_MC ? "Multicast Packet" :
- phy_data & E1000_WUS_BC ? "Broadcast Packet" :
- phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ?
- "Link Status Change" : "other");
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
if (wus) {
e_info("MAC Wakeup cause - %s\n",
- wus & E1000_WUS_EX ? "Unicast Packet" :
- wus & E1000_WUS_MC ? "Multicast Packet" :
- wus & E1000_WUS_BC ? "Broadcast Packet" :
- wus & E1000_WUS_MAG ? "Magic Packet" :
- wus & E1000_WUS_LNKC ? "Link Status Change" :
- "other");
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
}
ew32(WUS, ~0);
}
@@ -6369,7 +6341,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
e_info("(PCI Express:2.5GT/s:%s) %pM\n",
/* bus width */
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- "Width x1"),
+ "Width x1"),
/* MAC address */
netdev->dev_addr);
e_info("Intel(R) PRO/%s Network Connection\n",
@@ -6479,7 +6451,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int i, err, pci_using_dac;
+ int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
@@ -6506,15 +6478,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
}
- err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions_exclusive(pdev, bars,
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -6683,11 +6656,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = e1000_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
init_timer(&adapter->phy_info_timer);
adapter->phy_info_timer.function = e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long) adapter;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
@@ -6795,7 +6768,7 @@ err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -6865,7 +6838,7 @@ static void e1000_remove(struct pci_dev *pdev)
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
free_netdev(netdev);
@@ -6886,7 +6859,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
+ board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
@@ -6962,8 +6936,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
- e1000_runtime_resume, e1000_idle)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
+ e1000_idle)
};
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 84fecc26816..44ddc0a0ee0 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
{
u32 ctrl_ext;
- udelay(10);
+ usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 98da75dff93..c16bd75b6ca 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -45,7 +45,7 @@
unsigned int copybreak = COPYBREAK_DEFAULT;
module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
+ "Maximum size of packet that is copied to a new buffer on receive");
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*
* Default Value: 1 (enabled)
*/
-E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+E1000_PARAM(WriteProtectNVM,
+ "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/* Enable CRC Stripping
*
@@ -160,13 +161,18 @@ struct e1000_option {
const char *err;
int def;
union {
- struct { /* range_option info */
+ /* range_option info */
+ struct {
int min;
int max;
} r;
- struct { /* list_option info */
+ /* list_option info */
+ struct {
int nr;
- struct e1000_opt_list { int i; char *str; } *p;
+ struct e1000_opt_list {
+ int i;
+ char *str;
+ } *p;
} l;
} arg;
};
@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"Using defaults for all values\n");
}
- { /* Transmit Interrupt Delay */
+ /* Transmit Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_int_delay = opt.def;
}
}
- { /* Transmit Absolute Interrupt Delay */
+ /* Transmit Absolute Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_abs_int_delay = opt.def;
}
}
- { /* Receive Interrupt Delay */
+ /* Receive Interrupt Delay */
+ {
static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_int_delay = opt.def;
}
}
- { /* Receive Absolute Interrupt Delay */
+ /* Receive Absolute Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_abs_int_delay = opt.def;
}
}
- { /* Interrupt Throttling Rate */
+ /* Interrupt Throttling Rate */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
break;
}
}
- { /* Interrupt Mode */
+ /* Interrupt Mode */
+ {
static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
kfree(opt.err);
#endif
}
- { /* Smart Power Down */
+ /* Smart Power Down */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
- { /* CRC Stripping */
+ /* CRC Stripping */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
}
}
- { /* Kumeran Lock Loss Workaround */
+ /* Kumeran Lock Loss Workaround */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
+ bool enabled = opt.def;
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
- if (hw->mac.type == e1000_ich8lan)
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
- kmrn_lock_loss);
- } else {
- if (hw->mac.type == e1000_ich8lan)
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
- opt.def);
+ enabled = kmrn_lock_loss;
}
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+ enabled);
}
- { /* Write-protect NVM */
+ /* Write-protect NVM */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
if (num_WriteProtectNVM > bd) {
- unsigned int write_protect_nvm = WriteProtectNVM[bd];
+ unsigned int write_protect_nvm =
+ WriteProtectNVM[bd];
e1000_validate_option(&write_protect_nvm, &opt,
adapter);
if (write_protect_nvm)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0930c136aa3..60dbf022e98 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
- 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
+};
+
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_m88_cable_length_table)
@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
- 124};
+ 124
+};
+
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
manc = er32(MANC);
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
- E1000_BLK_PHY_RESET : 0;
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
}
/**
@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -162,7 +165,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
+ usleep_range(50, 100);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -175,13 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
- *data = (u16) mdic;
+ *data = (u16)mdic;
/* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ usleep_range(100, 200);
return 0;
}
@@ -220,7 +223,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
+ usleep_range(50, 100);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -238,7 +241,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ usleep_range(100, 200);
return 0;
}
@@ -324,7 +327,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
* semaphores before exiting.
**/
static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked)
{
s32 ret_val = 0;
@@ -391,7 +394,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* at the offset. Release any acquired semaphores before exiting.
**/
static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked)
{
s32 ret_val = 0;
@@ -410,8 +413,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
(u16)offset);
if (!ret_val)
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
- offset,
- data);
+ offset, data);
if (!locked)
hw->phy.ops.release(hw);
@@ -458,7 +460,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
* Release any acquired semaphores before exiting.
**/
static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked)
{
u32 kmrnctrlsta;
@@ -531,7 +533,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* before exiting.
**/
static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked)
{
u32 kmrnctrlsta;
@@ -772,8 +774,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= M88E1000_EPSCR_TX_CLK_25;
- if ((phy->revision == 2) &&
- (phy->id == M88E1111_I_PHY_ID)) {
+ if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
/* 82573L PHY - set the downshift counter to 5x. */
phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
@@ -1296,7 +1297,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -1319,7 +1320,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
}
@@ -1609,9 +1610,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1653,9 +1654,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &data);
if (!ret_val)
- phy->cable_polarity = (data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1685,9 +1686,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &phy_data);
if (!ret_val)
- phy->cable_polarity = (phy_data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1733,7 +1734,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
* Polls the PHY status register for link, 'iterations' number of times.
**/
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success)
+ u32 usec_interval, bool *success)
{
s32 ret_val = 0;
u16 i, phy_status;
@@ -1756,7 +1757,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval/1000);
+ mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
@@ -1791,8 +1792,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@@ -1824,10 +1825,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
- IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
@@ -1841,8 +1842,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
@@ -1865,8 +1866,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -2040,9 +2041,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
return ret_val;
} else {
/* Polarity is forced */
- phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
}
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
@@ -2119,7 +2120,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
ew32(CTRL, ctrl);
e1e_flush();
- udelay(150);
+ usleep_range(150, 300);
phy->ops.release(hw);
@@ -2375,13 +2376,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
- (page << page_shift));
+ (page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
release:
hw->phy.ops.release(hw);
@@ -2433,13 +2434,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
- (page << page_shift));
+ (page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
release:
hw->phy.ops.release(hw);
return ret_val;
@@ -2674,7 +2675,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
if (read) {
/* Read the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
- data);
+ data);
} else {
/* Write the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
@@ -2763,7 +2764,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
- data, true);
+ data, true);
goto out;
}
@@ -2786,8 +2787,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page << IGP_PAGE_SHIFT, reg);
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
- data);
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
out:
if (!locked)
hw->phy.ops.release(hw);
@@ -2871,7 +2871,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
- &data, false);
+ &data, false);
goto out;
}
@@ -2910,7 +2910,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
page << IGP_PAGE_SHIFT, reg);
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
- data);
+ data);
out:
if (!locked)
@@ -2988,15 +2988,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* These accesses done with PHY address 2 and without using pages.
**/
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read)
+ u16 *data, bool read)
{
s32 ret_val;
u32 addr_reg;
u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
- addr_reg = (hw->phy.type == e1000_phy_82578) ?
- I82578_ADDR_REG : I82577_ADDR_REG;
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG);
data_reg = addr_reg + 1;
/* All operations in this function are phy address 2 */
@@ -3050,8 +3050,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000))
@@ -3086,9 +3086,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
- phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -3215,8 +3215,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d60cd439341..bea46bb2606 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
-
if (!tx_ring->desc)
goto err;
@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
-
if (!rx_ring->desc)
goto err;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ea480837343..74464c34845 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -717,14 +717,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) {
vfree(txdr->buffer_info);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
- memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
@@ -807,8 +804,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index db5611ae407..e56a3d169e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7007,7 +7007,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
int err;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return -EOPNOTSUPP;
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
@@ -7038,44 +7038,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
-
- if (ndm->ndm_state & NUD_PERMANENT) {
- pr_info("%s: FDB only supports static addresses\n",
- ixgbe_driver_name);
- return -EINVAL;
- }
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- int idx)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
-}
-
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh)
{
@@ -7171,8 +7133,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
.ndo_fdb_add = ixgbe_ndo_fdb_add,
- .ndo_fdb_del = ixgbe_ndo_fdb_del,
- .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
.ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fc0af9a3bb3..fff0d986752 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
+ union ixgbe_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c3db6cd69b6..ac0c315659d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbevf_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
- board_82599_vf},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
- board_X540_vf},
-
+static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
/* required last entry */
{0, }
};
@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
i = tx_ring->next_to_clean;
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ eop_desc = tx_buffer_info->next_to_watch;
- while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
+ do {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
- /* eop could change between read and DD-check */
- if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
- goto cont_loop;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer_info->next_to_watch = NULL;
+
for ( ; !cleaned; count++) {
struct sk_buff *skb;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = tx_buffer_info->skb;
if (cleaned && skb) {
@@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
i++;
if (i == tx_ring->count)
i = 0;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
}
-cont_loop:
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
- }
+ eop_desc = tx_buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -2417,9 +2423,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- hw_dbg(&adapter->hw,
- "Unable to allocate memory for "
- "the receive descriptor ring\n");
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
goto alloc_failed;
@@ -2806,8 +2809,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ struct sk_buff *skb, u32 tx_flags)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
@@ -2832,7 +2834,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->next_to_watch = i;
len -= size;
total -= size;
@@ -2862,7 +2863,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->mapped_as_page = true;
- tx_buffer_info->next_to_watch = i;
len -= size;
total -= size;
@@ -2881,8 +2881,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
else
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
- tx_ring->tx_buffer_info[first].next_to_watch = i;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
@@ -2891,7 +2889,6 @@ dma_error:
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->next_to_watch = 0;
count--;
/* clear timestamp and dma mappings for remaining portion of packet */
@@ -2908,7 +2905,8 @@ dma_error:
}
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, u32 paylen, u8 hdr_len)
+ int count, unsigned int first, u32 paylen,
+ u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -2959,6 +2957,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
}
@@ -3050,15 +3058,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_flags |= IXGBE_TX_FLAGS_CSUM;
ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
- skb->len, hdr_len);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ ixgbevf_tx_map(tx_ring, skb, tx_flags),
+ first, skb->len, hdr_len);
writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 6562c736a1d..d1ecf4bf7da 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -20,6 +20,8 @@
* Copyright (C) 2007-2008 Marvell Semiconductor
* Lennert Buytenhek <buytenh@marvell.com>
*
+ * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -1523,6 +1525,34 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
return 0;
}
+static void
+mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ wol->supported = 0;
+ wol->wolopts = 0;
+ if (mp->phy)
+ phy_ethtool_get_wol(mp->phy, wol);
+}
+
+static int
+mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int err;
+
+ if (mp->phy == NULL)
+ return -EOPNOTSUPP;
+
+ err = phy_ethtool_set_wol(mp->phy, wol);
+ /* Given that mv643xx_eth works without the marvell-specific PHY driver,
+ * this debugging hint is useful to have.
+ */
+ if (err == -EOPNOTSUPP)
+ netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
+ return err;
+}
+
static int
mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -1708,6 +1738,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
.get_sset_count = mv643xx_eth_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = mv643xx_eth_get_wol,
+ .set_wol = mv643xx_eth_set_wol,
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969b..e48261e468f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL);
- if (rxq->descs == NULL) {
- netdev_err(pp->dev,
- "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
- rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
- rxq->size);
+ if (rxq->descs == NULL)
return -ENOMEM;
- }
BUG_ON(rxq->descs !=
PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL);
- if (txq->descs == NULL) {
- netdev_err(pp->dev,
- "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
- txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
- txq->size);
+ if (txq->descs == NULL)
return -ENOMEM;
- }
/* Make sure descriptor address is cache line size aligned */
BUG_ON(txq->descs !=
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 037ed866c22..339bb323cb0 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep)
*/
if (pep->htpr == NULL) {
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (pep->htpr == NULL)
return -ENOMEM;
+ } else {
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
}
- memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
wrl(pep, HTPR, pep->htpr_dma);
return 0;
}
@@ -1023,13 +1025,11 @@ static int rxq_init(struct net_device *dev)
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma, GFP_KERNEL);
- if (!pep->p_rx_desc_area) {
- printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
- dev->name, size);
+ &pep->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pep->p_rx_desc_area)
goto out;
- }
- memset((void *)pep->p_rx_desc_area, 0, size);
+
/* initialize the next_desc_ptr links in the Rx descriptors ring */
p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
@@ -1086,13 +1086,10 @@ static int txq_init(struct net_device *dev)
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma, GFP_KERNEL);
- if (!pep->p_tx_desc_area) {
- printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
- dev->name, size);
+ &pep->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pep->p_tx_desc_area)
goto out;
- }
- memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index fdc5f23d8e9..05267d716e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1837,10 +1837,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
- if (!priv->mfunc.vhcr) {
- mlx4_err(dev, "Couldn't allocate VHCR.\n");
+ if (!priv->mfunc.vhcr)
goto err_hcr;
- }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 995d4b6d5c1..47b99609655 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1931,79 +1931,6 @@ static int mlx4_en_set_features(struct net_device *netdev,
}
-static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 flags)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
- int err;
-
- if (!mlx4_is_mfunc(mdev))
- return -EOPNOTSUPP;
-
- /* Hardware does not support aging addresses, allow only
- * permanent addresses if ndm_state is given
- */
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- en_info(priv, "Add FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
- else
- err = -EINVAL;
-
- /* Only return duplicate errors if NLM_F_EXCL is set */
- if (err == -EEXIST && !(flags & NLM_F_EXCL))
- err = 0;
-
- return err;
-}
-
-static int mlx4_en_fdb_del(struct ndmsg *ndm,
- struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
- int err;
-
- if (!mlx4_is_mfunc(mdev))
- return -EOPNOTSUPP;
-
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- en_info(priv, "Del FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
-
- return err;
-}
-
-static int mlx4_en_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev, int idx)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
-
- if (mlx4_is_mfunc(mdev))
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
-}
-
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2025,9 +1952,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
- .ndo_fdb_add = mlx4_en_fdb_add,
- .ndo_fdb_del = mlx4_en_fdb_del,
- .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 3488c6d9e6b..2448f0d669e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
/* build the pkt before xmit */
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
- if (!skb) {
- en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+ if (!skb)
return -ENOMEM;
- }
+
skb_reserve(skb, NET_IP_ALIGN);
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index a343066f7b4..ddaf138ce0d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -792,20 +792,35 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
frame_hdr = ks->frame_head_info;
while (ks->frame_cnt--) {
+ if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
+ frame_hdr->len >= RX_BUF_SIZE ||
+ frame_hdr->len <= 0)) {
+
+ /* discard an invalid packet */
+ ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
+ netdev->stats.rx_dropped++;
+ if (!(frame_hdr->sts & RXFSHR_RXFV))
+ netdev->stats.rx_frame_errors++;
+ else
+ netdev->stats.rx_length_errors++;
+ frame_hdr++;
+ continue;
+ }
+
skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
- if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
- (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
+ if (likely(skb)) {
skb_reserve(skb, 2);
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
- skb_put(skb, frame_hdr->len);
+ skb_put(skb, frame_hdr->len - 4);
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
+ /* exclude CRC size */
+ netdev->stats.rx_bytes += frame_hdr->len - 4;
+ netdev->stats.rx_packets++;
} else {
- pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- if (skb)
- dev_kfree_skb_irq(skb);
+ netdev->stats.rx_dropped++;
}
frame_hdr++;
}
@@ -877,6 +892,8 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
}
+ if (unlikely(status & IRQ_RXOI))
+ ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
return IRQ_HANDLED;
@@ -1015,6 +1032,9 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
ks_write_qmu(ks, skb->data, skb->len);
+ /* add tx statistics */
+ netdev->stats.tx_bytes += skb->len;
+ netdev->stats.tx_packets++;
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4f9937e026e..d5ffdc8264e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3592,10 +3592,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (ss->rx_done.entry == NULL)
goto abort;
- memset(ss->rx_done.entry, 0, bytes);
bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index b0b36154636..c20766c2f65 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- if ((lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
- dev_name(lp->device));
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
+ if (lp->descriptors == NULL)
goto out;
- }
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0ffde69c8d0..346a4e025c3 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- if ((lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
- dev_name(lp->device));
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
+ if (lp->descriptors == NULL)
return -ENOMEM;
- }
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 46795e40346..1bd419dbda6 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev)
/* Malloc up new buffer. */
new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (new_skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 5e4748e855f..c2e0256fe3d 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev)
* We also allocate extra space for a pointer to allow freeing
* this structure later on (in xtsonic_cleanup_module()).
*/
- lp->descriptors =
- dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL);
-
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
if (lp->descriptors == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for "
- " descriptors.\n", dev_name(lp->device));
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bfd887382e1..3371ff41bb3 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -80,6 +80,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/tcp.h>
+#include <net/checksum.h>
#include <asm/div64.h>
#include <asm/irq.h>
@@ -8337,16 +8338,13 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
{
struct iphdr *ip = lro->iph;
struct tcphdr *tcp = lro->tcph;
- __sum16 nchk;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
/* Update L3 header */
+ csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
ip->tot_len = htons(lro->total_len);
- ip->check = 0;
- nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
- ip->check = nchk;
/* Update L4 header */
tcp->ack_seq = lro->tcp_ack;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 63e7af44366..cb9e6383150 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev)
skb = netdev_alloc_skb(ndev, len);
if (unlikely(skb == NULL)) {
- printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
- ndev->name);
ndev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 162da8975b0..3df8287b745 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -287,23 +287,16 @@ static int w90p910_init_desc(struct net_device *dev)
ether = netdev_priv(dev);
pdev = ether->pdev;
- ether->tdesc = (struct tran_pdesc *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- &ether->tdesc_phys, GFP_KERNEL);
-
- if (!ether->tdesc) {
- dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
+ ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
+ &ether->tdesc_phys, GFP_KERNEL);
+ if (!ether->tdesc)
return -ENOMEM;
- }
-
- ether->rdesc = (struct recv_pdesc *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
- &ether->rdesc_phys, GFP_KERNEL);
+ ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
+ &ether->rdesc_phys, GFP_KERNEL);
if (!ether->rdesc) {
- dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- ether->tdesc, ether->tdesc_phys);
+ ether->tdesc, ether->tdesc_phys);
return -ENOMEM;
}
@@ -737,7 +730,6 @@ static void netdev_rx(struct net_device *dev)
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
- dev_err(&pdev->dev, "get skb buffer error\n");
ether->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0b8de12bcbc..b62262cfe4d 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5025,7 +5025,6 @@ static int nv_loopback_test(struct net_device *dev)
pkt_len = ETH_DATA_LEN;
tx_skb = netdev_alloc_skb(dev, pkt_len);
if (!tx_skb) {
- netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index efa29b712d5..89d1b0eadf3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
dma_alloc_coherent(&pldat->pdev->dev,
pldat->dma_buff_size, &dma_handle,
GFP_KERNEL);
-
if (pldat->dma_buff_base_v == NULL) {
- dev_err(&pdev->dev, "error getting DMA region.\n");
ret = -ENOMEM;
goto err_out_free_irq;
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 39ab4d09faa..abd5fba09b8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1469,13 +1469,11 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic,
- GFP_KERNEL);
- if (!rx_ring->rx_buff_pool) {
- pr_err("Unable to allocate memory for the receive pool buffer\n");
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!rx_ring->rx_buff_pool)
return -ENOMEM;
- }
- memset(rx_ring->rx_buff_pool, 0, size);
+
rx_ring->rx_buff_pool_size = size;
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
@@ -1774,13 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
- pr_err("Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
- memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -1820,14 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!rx_ring->desc) {
- pr_err("Unable to allocate memory for the receive descriptor ring\n");
vfree(rx_ring->buffer_info);
return -ENOMEM;
}
- memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
for (desNo = 0; desNo < rx_ring->count; desNo++) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index b1cfbb75ff1..a5f0b5da614 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma, GFP_KERNEL);
+ &ring->buf_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!ring->buffers)
goto out_ring_desc;
- memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
-
write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index eb3dfdbb642..322a36b7672 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -955,9 +955,10 @@ typedef struct nx_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
-struct nx_vlan_ip_list {
+struct nx_ip_list {
struct list_head list;
__be32 ip_addr;
+ bool master;
};
/*
@@ -1605,7 +1606,7 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct list_head mac_list;
- struct list_head vlan_ip_list;
+ struct list_head ip_list;
spinlock_t tx_clean_lock;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 4782dcfde73..7692dfd4f26 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <net/checksum.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -1641,9 +1642,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
th->psh = push;
th->seq = htonl(seq_number);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 501f49207da..7867aebc05f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
-static void netxen_free_vlan_ip_list(struct netxen_adapter *);
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
@@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- INIT_LIST_HEAD(&adapter->vlan_ip_list);
+ INIT_LIST_HEAD(&adapter->ip_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->tx_timeout_task);
- netxen_free_vlan_ip_list(adapter);
+ netxen_free_ip_list(adapter, false);
netxen_nic_detach(adapter);
nx_decr_dev_ref_cnt(adapter);
@@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter)
}
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{
- struct nx_vlan_ip_list *cur;
- struct list_head *head = &adapter->vlan_ip_list;
+ struct nx_ip_list *cur, *tmp_cur;
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct nx_vlan_ip_list, list);
- netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
- list_del(&cur->list);
- kfree(cur);
+ list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+ if (master) {
+ if (cur->master) {
+ netxen_config_ipaddr(adapter, cur->ip_addr,
+ NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ } else {
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
}
-
}
-static void
-netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
struct in_ifaddr *ifa, unsigned long event)
{
struct net_device *dev;
- struct nx_vlan_ip_list *cur, *tmp_cur;
+ struct nx_ip_list *cur, *tmp_cur;
struct list_head *head;
+ bool ret = false;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
if (dev == NULL)
- return;
-
- if (!is_vlan_dev(dev))
- return;
+ goto out;
switch (event) {
case NX_IP_UP:
- list_for_each(head, &adapter->vlan_ip_list) {
- cur = list_entry(head, struct nx_vlan_ip_list, list);
+ list_for_each(head, &adapter->ip_list) {
+ cur = list_entry(head, struct nx_ip_list, list);
if (cur->ip_addr == ifa->ifa_address)
- return;
+ goto out;
}
- cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
+ cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
if (cur == NULL)
- return;
-
+ goto out;
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ dev = vlan_dev_real_dev(dev);
+ cur->master = !!netif_is_bond_master(dev);
cur->ip_addr = ifa->ifa_address;
- list_add_tail(&cur->list, &adapter->vlan_ip_list);
+ list_add_tail(&cur->list, &adapter->ip_list);
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ ret = true;
break;
case NX_IP_DOWN:
list_for_each_entry_safe(cur, tmp_cur,
- &adapter->vlan_ip_list, list) {
+ &adapter->ip_list, list) {
if (cur->ip_addr == ifa->ifa_address) {
list_del(&cur->list);
kfree(cur);
+ netxen_config_ipaddr(adapter, ifa->ifa_address,
+ NX_IP_DOWN);
+ ret = true;
break;
}
}
}
+out:
+ return ret;
}
+
static void
netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
@@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
for_ifa(indev) {
switch (event) {
case NETDEV_UP:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
+ netxen_list_config_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
+ netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
@@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct nx_vlan_ip_list *pos, *tmp_pos;
+ struct nx_ip_list *pos, *tmp_pos;
unsigned long ip_event;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
netxen_config_indev_addr(adapter, netdev, event);
- list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) {
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
}
}
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+ struct netxen_adapter *adapter;
+
+ if (!is_netxen_netdev(dev))
+ return false;
+ adapter = netdev_priv(dev);
+ if (!adapter)
+ return false;
+ if (!netxen_destip_supported(adapter))
+ return false;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return false;
+
+ return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+ struct net_device *master, *slave;
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ /*
+ * This is the case where the netxen nic is being
+ * enslaved and is dev_open()ed in bond_enslave()
+ * Now we should program the bond's (and its vlans')
+ * addresses in the netxen NIC.
+ */
+ if (master && netif_is_bond_master(master) &&
+ !netif_is_bond_slave(dev)) {
+ netxen_config_indev_addr(adapter, master, event);
+ for_each_netdev_rcu(&init_net, slave)
+ if (slave->priv_flags & IFF_802_1Q_VLAN &&
+ vlan_dev_real_dev(slave) == master)
+ netxen_config_indev_addr(adapter, slave, event);
+ }
+ rcu_read_unlock();
+ /*
+ * This is the case where the netxen nic is being
+ * released and is dev_close()ed in bond_release()
+ * just before IFF_BONDING is stripped.
+ */
+ if (!master && dev->priv_flags & IFF_BONDING)
+ netxen_free_ip_list(adapter, true);
+}
+
static int netxen_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
struct net_device *dev = (struct net_device *)ptr;
struct net_device *orig_dev = dev;
+ struct net_device *slave;
recheck:
if (dev == NULL)
@@ -3257,19 +3323,28 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter)
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- netxen_config_indev_addr(adapter, orig_dev, event);
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_config_indev_addr(adapter,
+ orig_dev, event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ /* Act only if the actual netxen is the target */
+ if (orig_dev == dev)
+ netxen_config_master(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
+ }
+ }
done:
return NOTIFY_DONE;
}
@@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
- struct net_device *dev;
-
+ struct net_device *dev, *slave;
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ unsigned long ip_event;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
goto done;
@@ -3293,31 +3368,24 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter || !netxen_destip_supported(adapter))
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- switch (event) {
- case NETDEV_UP:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
- break;
- case NETDEV_DOWN:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
- break;
- default:
- break;
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
}
-
done:
return NOTIFY_DONE;
}
@@ -3334,7 +3402,7 @@ static void
netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{ }
#endif
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 8fd38cb6d26..91a8fcd6c24 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!lrg_buf_cb->skb)) {
- netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
qdev->lrg_buf_skb_check++;
} else {
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ba3c72fce1f..15777995588 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 1
-#define _QLCNIC_LINUX_SUBVERSION 35
-#define QLCNIC_LINUX_VERSIONID "5.1.35"
+#define _QLCNIC_LINUX_SUBVERSION 37
+#define QLCNIC_LINUX_VERSIONID "5.1.37"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index cd5ae8813cb..56c3676bdbf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -15,36 +15,57 @@
#define RSS_HASHTYPE_IP_TCP 0x3
/* status descriptor mailbox data
- * @phy_addr: physical address of buffer
+ * @phy_addr_{low|high}: physical address of buffer
* @sds_ring_size: buffer size
* @intrpt_id: interrupt id
* @intrpt_val: source of interrupt
*/
struct qlcnic_sds_mbx {
- u64 phy_addr;
- u8 rsvd1[16];
+ u32 phy_addr_low;
+ u32 phy_addr_high;
+ u32 rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
u16 sds_ring_size;
- u16 rsvd2[3];
+ u16 rsvd2;
+ u16 rsvd3[2];
u16 intrpt_id;
u8 intrpt_val;
- u8 rsvd3[5];
+ u8 rsvd4;
+#elif defined(__BIG_ENDIAN)
+ u16 rsvd2;
+ u16 sds_ring_size;
+ u16 rsvd3[2];
+ u8 rsvd4;
+ u8 intrpt_val;
+ u16 intrpt_id;
+#endif
+ u32 rsvd5;
} __packed;
/* receive descriptor buffer data
- * phy_addr_reg: physical address of regular buffer
- * phy_addr_jmb: physical address of jumbo buffer
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
* reg_ring_sz: size of regular buffer
* reg_ring_len: no. of entries in regular buffer
* jmb_ring_len: no. of entries in jumbo buffer
* jmb_ring_sz: size of jumbo buffer
*/
struct qlcnic_rds_mbx {
- u64 phy_addr_reg;
- u64 phy_addr_jmb;
+ u32 phy_addr_reg_low;
+ u32 phy_addr_reg_high;
+ u32 phy_addr_jmb_low;
+ u32 phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
u16 reg_ring_sz;
u16 reg_ring_len;
u16 jmb_ring_sz;
u16 jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+ u16 reg_ring_len;
+ u16 reg_ring_sz;
+ u16 jmb_ring_len;
+ u16 jmb_ring_sz;
+#endif
} __packed;
/* host producers for regular and jumbo rings */
@@ -61,6 +82,7 @@ struct __host_producer_mbx {
* @phy_port: physical port id
*/
struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
u8 rcv_num;
u8 sts_num;
u16 ctx_id;
@@ -68,32 +90,56 @@ struct qlcnic_rcv_mbx_out {
u8 num_pci_func;
u8 phy_port;
u8 vport_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+ u8 vport_id;
+ u8 phy_port;
+ u8 num_pci_func;
+ u8 state;
+#endif
u32 host_csmr[QLCNIC_MAX_RING_SETS];
struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
} __packed;
struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
u8 rcv_num;
u8 sts_num;
- u16 ctx_id;
+ u16 ctx_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+#endif
u32 host_csmr[QLCNIC_MAX_RING_SETS];
struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
} __packed;
/* Transmit context mailbox inbox registers
- * @phys_addr: DMA address of the transmit buffer
- * @cnsmr_index: host consumer index
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
* @size: legth of transmit buffer ring
* @intr_id: interrput id
* @src: src of interrupt
*/
struct qlcnic_tx_mbx {
- u64 phys_addr;
- u64 cnsmr_index;
+ u32 phys_addr_low;
+ u32 phys_addr_high;
+ u32 cnsmr_index_low;
+ u32 cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
u16 size;
u16 intr_id;
u8 src;
u8 rsvd[3];
+#elif defined(__BIG_ENDIAN)
+ u16 intr_id;
+ u16 size;
+ u8 rsvd[3];
+ u8 src;
+#endif
} __packed;
/* Transmit context mailbox outbox registers
@@ -101,11 +147,18 @@ struct qlcnic_tx_mbx {
* @ctx_id: transmit context id
* @state: state of the transmit context
*/
+
struct qlcnic_tx_mbx_out {
u32 host_prod;
+#if defined(__LITTLE_ENDIAN)
u16 ctx_id;
u8 state;
u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 state;
+ u16 ctx_id;
+#endif
} __packed;
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
@@ -1004,7 +1057,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
- sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1090,7 +1144,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
- sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[i].id;
@@ -1110,13 +1165,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
rds = &recv_ctx->rds_rings[0];
rds->producer = 0;
memset(&rds_mbx, 0, rds_mbx_size);
- rds_mbx.phy_addr_reg = rds->phys_addr;
+ rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
rds_mbx.reg_ring_sz = rds->dma_size;
rds_mbx.reg_ring_len = rds->num_desc;
/* Jumbo ring */
rds = &recv_ctx->rds_rings[1];
rds->producer = 0;
- rds_mbx.phy_addr_jmb = rds->phys_addr;
+ rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
rds_mbx.jmb_ring_sz = rds->dma_size;
rds_mbx.jmb_ring_len = rds->num_desc;
buf = &cmd.req.arg[index];
@@ -1182,8 +1239,10 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
/* setup mailbox inbox registerss */
- mbx.phys_addr = tx->phys_addr;
- mbx.cnsmr_index = tx->hw_cons_phys_addr;
+ mbx.phys_addr_low = LSD(tx->phys_addr);
+ mbx.phys_addr_high = MSD(tx->phys_addr);
+ mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+ mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
mbx.size = tx->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
@@ -1373,6 +1432,51 @@ mbx_err:
}
}
+int qlcnic_83xx_set_led(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EIO, active = 1;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "LED test is not supported in non-privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to set LED blink state\n");
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to reset LED blink state\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
int enable)
{
@@ -1713,7 +1817,12 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
(adapter->recv_ctx->context_id << 16);
mv.vlan = le16_to_cpu(vlan_id);
- memcpy(&mv.mac, addr, ETH_ALEN);
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
buf = &cmd.req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -2163,7 +2272,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
{
int ret;
u32 cmd;
@@ -2181,7 +2290,7 @@ static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
{
int ret;
@@ -2255,7 +2364,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
return -EIO;
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
- ret = qlcnic_83xx_enable_flash_write_op(adapter);
+ ret = qlcnic_83xx_enable_flash_write(adapter);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
dev_err(&adapter->pdev->dev,
@@ -2297,7 +2406,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
- ret = qlcnic_83xx_disable_flash_write_op(adapter);
+ ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
dev_err(&adapter->pdev->dev,
@@ -2337,8 +2446,8 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
u32 temp;
int ret = -EIO;
- if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
- (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
+ if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_WRITE_MAX)) {
dev_err(&adapter->pdev->dev,
"%s: Invalid word count\n", __func__);
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 61f81f6c84a..fbb3d1d9e55 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -12,6 +12,8 @@
#include <linux/etherdevice.h>
#include "qlcnic_hw.h"
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
/* Directly mapped registers */
#define QLC_83XX_CRB_WIN_BASE 0x3800
#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
@@ -94,8 +96,23 @@ struct qlcnic_intrpt_config {
};
struct qlcnic_macvlan_mbx {
- u8 mac[ETH_ALEN];
+#if defined(__LITTLE_ENDIAN)
+ u8 mac_addr0;
+ u8 mac_addr1;
+ u8 mac_addr2;
+ u8 mac_addr3;
+ u8 mac_addr4;
+ u8 mac_addr5;
+ u16 vlan;
+#elif defined(__BIG_ENDIAN)
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
u16 vlan;
+ u8 mac_addr5;
+ u8 mac_addr4;
+#endif
};
struct qlc_83xx_fw_info {
@@ -242,8 +259,8 @@ struct qlc_83xx_idc {
#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
#define QLC_83XX_FLASH_STATUS_READY 0x6
-#define QLC_83XX_FLASH_BULK_WRITE_MIN 2
-#define QLC_83XX_FLASH_BULK_WRITE_MAX 64
+#define QLC_83XX_FLASH_WRITE_MIN 2
+#define QLC_83XX_FLASH_WRITE_MAX 64
#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
#define QLC_83XX_ERASE_MODE 1
#define QLC_83XX_WRITE_MODE 2
@@ -434,5 +451,8 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
int qlcnic_83xx_loopback_test(struct net_device *, u8);
int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 5c033f268ca..ba5ac69bf48 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -31,6 +31,7 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
/* Template header */
struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
u16 version;
u16 signature;
u16 size;
@@ -39,14 +40,31 @@ struct qlc_83xx_reset_hdr {
u16 checksum;
u16 init_offset;
u16 start_offset;
+#elif defined(__BIG_ENDIAN)
+ u16 signature;
+ u16 version;
+ u16 entries;
+ u16 size;
+ u16 checksum;
+ u16 hdr_size;
+ u16 start_offset;
+ u16 init_offset;
+#endif
} __packed;
/* Command entry header. */
struct qlc_83xx_entry_hdr {
- u16 cmd;
- u16 size;
- u16 count;
- u16 delay;
+#if defined(__LITTLE_ENDIAN)
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+#elif defined(__BIG_ENDIAN)
+ u16 size;
+ u16 cmd;
+ u16 delay;
+ u16 count;
+#endif
} __packed;
/* Generic poll command */
@@ -60,10 +78,17 @@ struct qlc_83xx_rmw {
u32 mask;
u32 xor_value;
u32 or_value;
+#if defined(__LITTLE_ENDIAN)
u8 shl;
u8 shr;
u8 index_a;
u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 index_a;
+ u8 shr;
+ u8 shl;
+#endif
} __packed;
/* Generic command with 2 DWORD */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index a69097c6b84..2d9c23fcec5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -422,22 +422,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
- memset(rq_addr, 0, rq_size);
prq = rq_addr;
- memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -532,20 +530,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
GFP_KERNEL);
-
- if (ptr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ if (ptr == NULL)
return -ENOMEM;
- }
+
tx_ring->hw_consumer = ptr;
/* cmd desc ring */
addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
&tx_ring->phys_addr,
GFP_KERNEL);
-
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate tx desc ring\n");
err = -ENOMEM;
goto err_out_free;
}
@@ -556,11 +549,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr, GFP_KERNEL);
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate rds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -572,11 +563,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr, GFP_KERNEL);
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate sds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -753,10 +742,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
@@ -804,11 +792,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
return err;
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func);
@@ -854,10 +841,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ &pci_info_dma_t,
+ GFP_KERNEL | __GFP_ZERO);
if (!pci_info_addr)
return -ENOMEM;
- memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
@@ -949,12 +936,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
}
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
@@ -1003,13 +987,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM;
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev,
- "%s: Unable to allocate memory.\n", __func__);
+ &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
+
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
cmd.req.arg[1] = stats_size << 16;
cmd.req.arg[2] = MSD(stats_dma_t);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5641f8ec49a..f4f279d5cba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -149,7 +149,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
static inline int qlcnic_82xx_statistics(void)
{
- return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
}
static inline int qlcnic_83xx_statistics(void)
@@ -1070,8 +1071,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static void
-qlcnic_fill_stats(u64 *data, void *stats, int type)
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
{
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
@@ -1120,6 +1120,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type)
*data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
+ return data;
}
static void qlcnic_get_ethtool_stats(struct net_device *dev,
@@ -1147,7 +1148,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
/* Retrieve MAC statistics from firmware */
memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -1159,7 +1160,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
+ data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
@@ -1176,7 +1177,8 @@ static int qlcnic_set_led(struct net_device *dev,
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
- return -EOPNOTSUPP;
+ return qlcnic_83xx_set_led(dev, state);
+
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0e630061bff..891f12d47c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -9,6 +9,7 @@
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
+#include <net/checksum.h>
#include "qlcnic.h"
@@ -1132,9 +1133,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
iph = (struct iphdr *)skb->data;
th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
th->psh = push;
@@ -1595,9 +1595,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
iph = (struct iphdr *)skb->data;
th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
th->psh = push;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 28a6d483836..c6f9d5eb7d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -253,11 +253,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return err;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
if (is_unicast_ether_addr(addr))
@@ -277,11 +274,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return -EOPNOTSUPP;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
@@ -306,11 +300,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return -EOPNOTSUPP;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index abbd22c814a..4b9bab18ebd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
&tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
+ if (!tmp_addr)
return -ENOMEM;
- }
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 987fb6f8adc..c77675da671 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -21,8 +21,6 @@
#include <linux/aer.h>
#include <linux/log2.h>
-#include <linux/sysfs.h>
-
#define QLC_STATUS_UNSUPPORTED_CMD -2
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
@@ -886,6 +884,244 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return size;
}
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ unsigned char *p_read_buf;
+ int ret, count;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!size)
+ return QL_STATUS_INVALID_PARAM;
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ count = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ count++;
+
+ p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_read_buf)
+ return -ENOMEM;
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_read_buf);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+ count);
+
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ kfree(p_read_buf);
+ return ret;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+ memcpy(buf, p_read_buf, size);
+ kfree(p_read_buf);
+
+ return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+ (u32 *)p_src,
+ QLC_83XX_FLASH_WRITE_MAX);
+
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset, size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32);
+ offset = offset + sizeof(u32);
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int ret;
+ static int flash_mode;
+ unsigned long data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = kstrtoul(buf, 16, &data);
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+ flash_mode = QLC_83XX_ERASE_MODE;
+ ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ break;
+
+ case QLC_83XX_FLASH_BULK_WRITE_CMD:
+ flash_mode = QLC_83XX_BULK_WRITE_MODE;
+ break;
+
+ case QLC_83XX_FLASH_WRITE_CMD:
+ flash_mode = QLC_83XX_WRITE_MODE;
+ break;
+ default:
+ if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+
+ if (flash_mode == QLC_83XX_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__,
+ __LINE__);
+ return -EIO;
+ }
+ }
+ }
+
+ return size;
+}
+
static struct device_attribute dev_attr_bridged_mode = {
.attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_bridged_mode,
@@ -960,6 +1196,13 @@ static struct bin_attribute bin_attr_pm_config = {
.write = qlcnic_sysfs_write_pm_config,
};
+static struct bin_attribute bin_attr_flash = {
+ .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -1048,10 +1291,18 @@ void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
+
qlcnic_create_diag_entries(adapter);
+
+ if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+ dev_info(dev, "failed to create flash sysfs entry\n");
}
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
+
qlcnic_remove_diag_entries(adapter);
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b13ab544a7e..1dd778a6f01 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1519,8 +1517,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, need to unwind!.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
@@ -1605,8 +1601,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "No skb available, drop the packet.\n");
rx_ring->rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 5b4103db70f..e9dc84943cf 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
break;
}
+ if (limit < 0)
+ return -ETIMEDOUT;
+
return ioread16(ioaddr + MMRD);
}
/* Write a word data from PHY Chip */
-static void r6040_phy_write(void __iomem *ioaddr,
+static int r6040_phy_write(void __iomem *ioaddr,
int phy_addr, int reg, u16 val)
{
int limit = MAC_DEF_TIMEOUT;
@@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr,
if (!(cmd & MDIO_WRITE))
break;
}
+
+ return (limit < 0) ? -ETIMEDOUT : 0;
}
static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
@@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- r6040_phy_write(ioaddr, phy_addr, reg, value);
-
- return 0;
+ return r6040_phy_write(ioaddr, phy_addr, reg, value);
}
static int r6040_mdiobus_reset(struct mii_bus *bus)
@@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1276ac71353..3ccedeb8aba 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2041,8 +2041,6 @@ keep_pkt:
netif_receive_skb (skb);
} else {
- if (net_ratelimit())
- netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
received++;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 9f2d416de75..d77d60ea820 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
- dev->name);
dev->stats.rx_dropped++;
goto done;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bf5e3cf97c4..56884c4cd90 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -908,11 +908,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
- GFP_KERNEL);
-
+ GFP_KERNEL);
if (!mdp->rx_ring) {
- dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
- rx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -922,10 +919,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!mdp->tx_ring) {
- dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
- tx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 3aca57853ed..bdac936a68b 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
received ++;
- } else
- goto dropping;
+ } else {
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ dev->stats.rx_dropped++;
+ goto done;
+ }
} else {
struct net_device_stats *stats = &dev->stats;
ether3_outw(next_ptr >> 8, REG_RECVEND);
@@ -679,21 +682,6 @@ done:
}
return maxcnt;
-
-dropping:{
- static unsigned long last_warned;
-
- ether3_outw(next_ptr >> 8, REG_RECVEND);
- /*
- * Don't print this message too many times...
- */
- if (time_after(jiffies, last_warned + 10 * HZ)) {
- last_warned = jiffies;
- printk("%s: memory squeeze, dropping packet.\n", dev->name);
- }
- dev->stats.rx_dropped++;
- goto done;
- }
}
/*
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 0fde9ca2826..0ad5694b41f 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -381,8 +381,6 @@ memory_squeeze:
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
} else {
- printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
}
} else {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0bc00991d31..01b99206139 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/cpu_rmap.h>
+#include <linux/aer.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
@@ -71,21 +72,21 @@ const char *const efx_loopback_mode_names[] = {
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+ [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
-#define EFX_MAX_MTU (9 * 1024)
-
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -117,9 +118,12 @@ MODULE_PARM_DESC(separate_tx_channels,
static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware
- * monitor. On Falcon-based NICs, this will:
+ * monitor.
+ * On Falcon-based NICs, this will:
* - Check the on-board hardware monitor;
* - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
*/
static unsigned int efx_monitor_interval = 1 * HZ;
@@ -203,13 +207,14 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
static int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED) {
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
@@ -242,15 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
struct efx_rx_queue *rx_queue =
efx_channel_get_rx_queue(channel);
- /* Deliver last RX packet. */
- if (channel->rx_pkt) {
- __efx_rx_packet(channel, channel->rx_pkt);
- channel->rx_pkt = NULL;
- }
- if (rx_queue->enabled) {
- efx_rx_strategy(channel);
+ efx_rx_flush_packet(channel);
+ if (rx_queue->enabled)
efx_fast_push_rx_descriptors(rx_queue);
- }
}
return spent;
@@ -625,20 +624,51 @@ fail:
*/
static void efx_start_datapath(struct efx_nic *efx)
{
+ bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
+ size_t rx_buf_len;
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_hash_size +
- efx->type->rx_buffer_padding);
- efx->rx_buffer_order = get_order(efx->rx_buffer_len +
- sizeof(struct efx_rx_page_state));
+ efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
+ PAGE_SIZE / 2);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* RX filters also have scatter-enabled flags */
+ if (efx->rx_scatter != old_rx_scatter)
+ efx_filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -655,16 +685,12 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
- /* The rx buffer allocation strategy is MTU dependent */
- efx_rx_strategy(channel);
-
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
efx_nic_generate_fill_event(rx_queue);
}
- WARN_ON(channel->rx_pkt != NULL);
- efx_rx_strategy(channel);
+ WARN_ON(channel->rx_pkt_n_frags);
}
if (netif_device_present(efx->net_dev))
@@ -683,7 +709,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
/* Only perform flush if dma is enabled */
- if (dev->is_busmaster) {
+ if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
@@ -1596,13 +1622,15 @@ static void efx_start_all(struct efx_nic *efx)
efx_start_port(efx);
efx_start_datapath(efx);
- /* Start the hardware monitor if there is one. Otherwise (we're link
- * event driven), we have to poll the PHY because after an event queue
- * flush, we could have a missed a link state change */
- if (efx->type->monitor != NULL) {
+ /* Start the hardware monitor if there is one */
+ if (efx->type->monitor != NULL)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
- } else {
+
+ /* If link state detection is normally event-driven, we have
+ * to poll now because we could have missed a change
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
@@ -2309,7 +2337,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
out:
/* Leave device stopped if necessary */
- disabled = rc || method == RESET_TYPE_DISABLE;
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
@@ -2328,13 +2358,48 @@ out:
return rc;
}
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+static int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev =
+ of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending = ACCESS_ONCE(efx->reset_pending);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = ACCESS_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
if (!pending)
return;
@@ -2346,7 +2411,7 @@ static void efx_reset_work(struct work_struct *data)
* it cannot change again.
*/
if (efx->state == STATE_READY)
- (void)efx_reset(efx, fls(pending) - 1);
+ (void)efx_reset(efx, method);
rtnl_unlock();
}
@@ -2355,11 +2420,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2569,6 +2643,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_fini_struct(efx);
pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -2741,6 +2817,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
+ rc = pci_enable_pcie_error_reporting(pci_dev);
+ if (rc && rc != -EINVAL)
+ netif_warn(efx, probe, efx->net_dev,
+ "pci_enable_pcie_error_reporting failed (%d)\n", rc);
+
return 0;
fail4:
@@ -2865,12 +2946,112 @@ static const struct dev_pm_ops efx_pm_ops = {
.restore = efx_pm_resume,
};
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_stop_interrupts(efx, false);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successfull reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ int rc;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ rc = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
+ /* Non-fatal error. Continue. */
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = efx_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "efx_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+static struct pci_error_handlers efx_err_handlers = {
+ .error_detected = efx_io_error_detected,
+ .slot_reset = efx_io_slot_reset,
+ .resume = efx_io_resume,
+};
+
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .err_handler = &efx_err_handlers,
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index d2f790df6dc..8372da239b4 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
+extern void efx_rx_config_page_split(struct efx_nic *efx);
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+extern void __efx_rx_packet(struct efx_channel *channel);
+extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
+ unsigned int index, unsigned int n_frags,
unsigned int len, u16 flags);
+static inline void efx_rx_flush_packet(struct efx_channel *channel)
+{
+ if (channel->rx_pkt_n_frags)
+ __efx_rx_packet(channel);
+}
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
+extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 182dbe2cc6e..ab8fb5889e5 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -137,8 +137,12 @@ enum efx_loopback_mode {
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,9 +154,11 @@ enum efx_loopback_mode {
*/
enum reset_type {
RESET_TYPE_INVISIBLE = 0,
- RESET_TYPE_ALL = 1,
- RESET_TYPE_WORLD = 2,
- RESET_TYPE_DISABLE = 3,
+ RESET_TYPE_RECOVER_OR_ALL = 1,
+ RESET_TYPE_ALL = 2,
+ RESET_TYPE_WORLD = 3,
+ RESET_TYPE_RECOVER_OR_DISABLE = 4,
+ RESET_TYPE_DISABLE = 5,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8e61cd06f66..6e768175e7e 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
};
/* Number of ethtool statistics */
@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 49bcd196e10..4486102fa9b 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
static void falcon_init_rx_cfg(struct efx_nic *efx)
{
- /* Prior to Siena the RX DMA engine will split each frame at
- * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
- * be so large that that never happens. */
- const unsigned huge_buf_size = (3 * 4096) >> 5;
/* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25;
@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_RX_CFG);
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
- /* Data FIFO size is 5.5K */
+ /* Data FIFO size is 5.5K. The RX DMA engine only
+ * supports scattering for user-mode queues, but will
+ * split DMA writes at intervals of RX_USR_BUF_SIZE
+ * (32-byte units) even for kernel-mode queues. We
+ * set it to be so large that that never happens.
+ */
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
- huge_buf_size);
+ (3 * 4096) >> 5);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
/* Data FIFO size is 80K; register fields moved */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
- huge_buf_size);
+ EFX_RX_USR_BUF_SIZE >> 5);
/* Send XON and XOFF at ~3 * max MTU away from empty/full */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_padding = 0x24,
+ .can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
+ .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 8af42cd1fed..2397f0e8d3e 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -66,6 +66,10 @@ struct efx_filter_state {
#endif
};
+static void efx_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_filter_table *table,
+ unsigned int filter_idx);
+
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_filter_hash(u32 key)
@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
struct efx_filter_spec *spec = &table->spec[filter_idx];
+ enum efx_filter_flags flags = 0;
+
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ if (efx->n_rx_channels > 1)
+ flags |= EFX_FILTER_FLAG_RX_RSS;
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
- EFX_FILTER_FLAG_RX_RSS, 0);
+ if (efx->rx_scatter)
+ flags |= EFX_FILTER_FLAG_RX_SCATTER;
+
+ efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
spec->type = EFX_FILTER_UC_DEF + filter_idx;
table->used_bitmap[0] |= 1 << filter_idx;
}
@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
break;
}
- case EFX_FILTER_TABLE_RX_DEF:
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- return spec->type - EFX_FILTER_UC_DEF;
-
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7(
@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
return true;
}
-static int efx_filter_search(struct efx_filter_table *table,
- struct efx_filter_spec *spec, u32 key,
- bool for_insert, unsigned int *depth_required)
-{
- unsigned hash, incr, filter_idx, depth, depth_max;
-
- hash = efx_filter_hash(key);
- incr = efx_filter_increment(key);
-
- filter_idx = hash & (table->size - 1);
- depth = 1;
- depth_max = (for_insert ?
- (spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
- table->search_depth[spec->type]);
-
- for (;;) {
- /* Return success if entry is used and matches this spec
- * or entry is unused and we are trying to insert.
- */
- if (test_bit(filter_idx, table->used_bitmap) ?
- efx_filter_equal(spec, &table->spec[filter_idx]) :
- for_insert) {
- *depth_required = depth;
- return filter_idx;
- }
-
- /* Return failure if we reached the maximum search depth */
- if (depth == depth_max)
- return for_insert ? -EBUSY : -ENOENT;
-
- filter_idx = (filter_idx + incr) & (table->size - 1);
- ++depth;
- }
-}
-
/*
* Construct/deconstruct external filter IDs. At least the RX filter
* IDs must be ordered by matching priority, for RX NFC semantics.
@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
- * @replace: Flag for whether the specified filter may replace a filter
- * with an identical match expression and equal or lower priority
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
+ *
+ * If an existing filter has equal match values to the new filter
+ * spec, then the new filter might replace it, depending on the
+ * relative priorities. If the existing filter has lower priority, or
+ * if @replace_equal is set and it has equal priority, then it is
+ * replaced. Otherwise the function fails, returning -%EPERM if
+ * the existing filter has higher priority or -%EEXIST if it has
+ * equal priority.
*/
s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace)
+ bool replace_equal)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- unsigned int filter_idx, depth = 0;
- u32 key;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
int rc;
if (!table || table->size == 0)
return -EINVAL;
- key = efx_filter_build(&filter, spec);
-
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
table->search_depth[spec->type]);
- spin_lock_bh(&state->lock);
+ if (table->id == EFX_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
+ EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
+ rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+ ins_index = rep_index;
- rc = efx_filter_search(table, spec, key, true, &depth);
- if (rc < 0)
- goto out;
- filter_idx = rc;
- BUG_ON(filter_idx >= table->size);
- saved_spec = &table->spec[filter_idx];
-
- if (test_bit(filter_idx, table->used_bitmap)) {
- /* Should we replace the existing filter? */
- if (!replace) {
+ spin_lock_bh(&state->lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_filter_build(&filter, spec);
+ unsigned int hash = efx_filter_hash(key);
+ unsigned int incr = efx_filter_increment(key);
+ unsigned int max_rep_depth = table->search_depth[spec->type];
+ unsigned int max_ins_depth =
+ spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&state->lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_filter_equal(spec, &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_filter_spec *saved_spec = &table->spec[rep_index];
+
+ if (spec->priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST;
goto out;
}
@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
rc = -EPERM;
goto out;
}
- } else {
- __set_bit(filter_idx, table->used_bitmap);
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
++table->used;
}
- *saved_spec = *spec;
+ table->spec[ins_index] = *spec;
if (table->id == EFX_FILTER_TABLE_RX_DEF) {
efx_filter_push_rx_config(efx);
@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
}
efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_filter_table_clear_entry(efx, table, rep_index);
}
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
- __func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(spec, filter_idx);
+ __func__, spec->type, ins_index, spec->dmaq_id);
+ rc = efx_filter_make_id(spec, ins_index);
out:
spin_unlock_bh(&state->lock);
@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx)
kfree(state);
}
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_filter_push_rx_config() */
+ continue;
+
+ efx_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&state->lock);
+}
+
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9d426d0457b..c5c9747861b 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -553,6 +553,7 @@
#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0a90abd2421..9bd433a095c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -69,6 +69,12 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+/* Maximum possible MTU the driver supports */
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
+#define EFX_RX_USR_BUF_SIZE 1824
+
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
@@ -206,25 +212,23 @@ struct efx_tx_queue {
/**
* struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer
- * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
- * Will be %NULL if the buffer slot is currently free.
- * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
+ * @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free.
- * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
- * @len: Buffer length, in bytes.
- * @flags: Flags for buffer and packet state.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ * If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state. These are only set on the
+ * first buffer of a scattered packet.
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
- union {
- struct sk_buff *skb;
- struct page *page;
- } u;
+ struct page *page;
u16 page_offset;
u16 len;
u16 flags;
};
-#define EFX_RX_BUF_PAGE 0x0001
+#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
@@ -260,14 +264,23 @@ struct efx_rx_page_state {
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Number of buffers used by current packet
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ * the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ * recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
* @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill)
* @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring
* refill was triggered.
- * @alloc_page_count: RX allocation strategy counter.
- * @alloc_skb_count: RX allocation strategy counter.
+ * @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
*/
struct efx_rx_queue {
@@ -279,15 +292,22 @@ struct efx_rx_queue {
bool enabled;
bool flush_pending;
- int added_count;
- int notified_count;
- int removed_count;
+ unsigned int added_count;
+ unsigned int notified_count;
+ unsigned int removed_count;
+ unsigned int scatter_n;
+ struct page **page_ring;
+ unsigned int page_add;
+ unsigned int page_remove;
+ unsigned int page_recycle_count;
+ unsigned int page_recycle_failed;
+ unsigned int page_recycle_full;
+ unsigned int page_ptr_mask;
unsigned int max_fill;
unsigned int fast_fill_trigger;
unsigned int min_fill;
unsigned int min_overfill;
- unsigned int alloc_page_count;
- unsigned int alloc_skb_count;
+ unsigned int recycle_count;
struct timer_list slow_fill;
unsigned int slow_fill_count;
};
@@ -336,10 +356,6 @@ enum efx_rx_alloc_method {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
- * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
- * and diagnostic counters
- * @rx_alloc_push_pages: RX allocation method currently in use for pushing
- * descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -347,6 +363,12 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ * lack of descriptors
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ * __efx_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
*/
@@ -371,9 +393,6 @@ struct efx_channel {
unsigned int rfs_filters_added;
#endif
- int rx_alloc_level;
- int rx_alloc_push_pages;
-
unsigned n_rx_tobe_disc;
unsigned n_rx_ip_hdr_chksum_err;
unsigned n_rx_tcp_udp_chksum_err;
@@ -381,11 +400,10 @@ struct efx_channel {
unsigned n_rx_frm_trunc;
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
+ unsigned int n_rx_nodesc_trunc;
- /* Used to pipeline received packets in order to optimise memory
- * access with prefetches.
- */
- struct efx_rx_buffer *rx_pkt;
+ unsigned int rx_pkt_n_frags;
+ unsigned int rx_pkt_index;
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
@@ -410,7 +428,7 @@ struct efx_channel_type {
void (*post_remove)(struct efx_channel *);
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
- void (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
bool keep_eventq;
};
@@ -446,6 +464,7 @@ enum nic_state {
STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_READY = 1, /* hardware ready and netdev registered */
STATE_DISABLED = 2, /* device disabled due to hardware errors */
+ STATE_RECOVERY = 3, /* device recovering from PCI error */
};
/*
@@ -684,10 +703,13 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
- * @rx_buffer_len: RX buffer length
+ * @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ * for use in sk_buff::truesize
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
+ * @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
@@ -800,10 +822,15 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
- unsigned int rx_buffer_len;
+ unsigned int rx_dma_len;
unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+ unsigned int rx_page_buf_step;
+ unsigned int rx_bufs_per_page;
+ unsigned int rx_pages_per_batch;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
+ bool rx_scatter;
unsigned int_error_count;
unsigned long int_error_expire;
@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX buffer
- * @rx_buffer_padding: Size of padding at end of RX buffer
+ * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
@@ -983,6 +1011,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_buffer_hash_size;
unsigned int rx_buffer_padding;
+ bool can_rx_scatter;
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
unsigned int timer_period_max;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index eaa8e874a3c..b0503cd8c2a 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_ATOMIC);
+ &buffer->dma_addr,
+ GFP_ATOMIC | __GFP_ZERO);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
- memset(buffer->addr, 0, len);
return 0;
}
@@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+ rx_queue->scatter_n = 0;
+
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
@@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- /* For >=B0 this is scatter so disable */
- FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
@@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
EFX_RX_PKT_DISCARD : 0;
}
-/* Handle receive events that are not in-order. */
-static void
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
@@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
}
/* Handle a packet received event
@@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
- bool rx_ev_pkt_ok;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
@@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
- /* Basic packet information */
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
- if (unlikely(rx_ev_desc_ptr != expected_ptr))
- efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IP or
@@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
channel->irq_mod_score += 2;
/* Handle received packet */
- efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3f93624fc27..07f6baa15c0 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -99,6 +99,9 @@
#define PTP_V2_VERSION_LENGTH 1
#define PTP_V2_VERSION_OFFSET 29
+#define PTP_V2_UUID_LENGTH 8
+#define PTP_V2_UUID_OFFSET 48
+
/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
* the MC only captures the last six bytes of the clock identity. These values
* reflect those, not the ones used in the standard. The standard permits
@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
unsigned number_readings = (response_length /
MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
unsigned i;
- unsigned min;
- unsigned min_set = 0;
unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
- bool min_valid = false;
u32 last_sec;
u32 start_sec;
struct timespec delta;
@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (number_readings == 0)
return -EAGAIN;
- /* Find minimum value in this set of results, discarding clearly
- * erroneous results.
+ /* Read the set of results and increment stats for any results that
+ * appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
- if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
- if (min_valid) {
- if (ptp->timeset[i].window < min_set)
- min_set = ptp->timeset[i].window;
- } else {
- min_valid = true;
- min_set = ptp->timeset[i].window;
- }
- }
- }
-
- if (min_valid) {
- if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
- min = ptp->base_sync_ns;
- else
- min = min_set;
- } else {
- min = SYNCHRONISATION_GRANULARITY_NS;
}
- /* Discard excessively long synchronise durations. The MC times
- * when it finishes reading the host time so the corrected window
- * time should be fairly constant for a given platform.
+ /* Find the last good host-MC synchronization result. The MC times
+ * when it finishes reading the host time so the corrected window time
+ * should be fairly constant for a given platform.
*/
total = 0;
for (i = 0; i < number_readings; i++)
@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns %dns\n",
- ptp->base_sync_ns, min_set);
+ "PTP no suitable synchronisations %dns\n",
+ ptp->base_sync_ns);
return -EAGAIN;
}
@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
* the receive timestamp from the MC - this will probably occur after the
* packet arrival because of the processing in the MC.
*/
-static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
{
struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
- u8 *data;
+ u8 *match_data_012, *match_data_345;
unsigned int version;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
/* Correct version? */
if (ptp->mode == MC_CMD_PTP_MODE_V1) {
- if (skb->len < PTP_V1_MIN_LENGTH) {
- netif_receive_skb(skb);
- return;
+ if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
+ return false;
}
version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
- netif_receive_skb(skb);
- return;
+ return false;
}
+
+ /* PTP V1 uses all six bytes of the UUID to match the packet
+ * to the timestamp
+ */
+ match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
+ match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
} else {
- if (skb->len < PTP_V2_MIN_LENGTH) {
- netif_receive_skb(skb);
- return;
+ if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
+ return false;
}
version = skb->data[PTP_V2_VERSION_OFFSET];
-
- BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
- BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
- BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
- BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
- BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
-
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
- netif_receive_skb(skb);
- return;
+ return false;
+ }
+
+ /* The original V2 implementation uses bytes 2-7 of
+ * the UUID to match the packet to the timestamp. This
+ * discards two of the bytes of the MAC address used
+ * to create the UUID (SF bug 33070). The PTP V2
+ * enhanced mode fixes this issue and uses bytes 0-2
+ * and byte 5-7 of the UUID.
+ */
+ match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ if (ptp->mode == MC_CMD_PTP_MODE_V2) {
+ match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ } else {
+ match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
timestamps = skb_hwtstamps(skb);
memset(timestamps, 0, sizeof(*timestamps));
+ /* We expect the sequence number to be in the same position in
+ * the packet for PTP V1 and V2
+ */
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
/* Extract UUID/Sequence information */
- data = skb->data + PTP_V1_UUID_OFFSET;
- match->words[0] = (data[0] |
- (data[1] << 8) |
- (data[2] << 16) |
- (data[3] << 24));
- match->words[1] = (data[4] |
- (data[5] << 8) |
+ match->words[0] = (match_data_012[0] |
+ (match_data_012[1] << 8) |
+ (match_data_012[2] << 16) |
+ (match_data_345[0] << 24));
+ match->words[1] = (match_data_345[1] |
+ (match_data_345[2] << 8) |
(skb->data[PTP_V1_SEQUENCE_OFFSET +
PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
skb_queue_tail(&ptp->rxq, skb);
queue_work(ptp->workwq, &ptp->work);
+
+ return true;
}
/* Transmit a PTP packet. This has to be transmitted by the MC
@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
* timestamped
*/
init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2;
+ new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
enable_wanted = true;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
if (init->tx_type != HWTSTAMP_TX_OFF)
enable_wanted = true;
+ /* Old versions of the firmware do not support the improved
+ * UUID filtering option (SF bug 33070). If the firmware does
+ * not accept the enhanced mode, fall back to the standard PTP
+ * v2 UUID filtering.
+ */
rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
+ if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
+ rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index bb579a6128c..e73e30bac10 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -16,6 +16,7 @@
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
+#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
@@ -24,85 +25,39 @@
#include "selftest.h"
#include "workarounds.h"
-/* Number of RX descriptors pushed at once. */
-#define EFX_RX_BATCH 8
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
-/* Maximum size of a buffer sharing a page */
-#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
-/*
- * rx_alloc_method - RX buffer allocation method
- *
- * This driver supports two methods for allocating and using RX buffers:
- * each RX buffer may be backed by an skb or by an order-n page.
- *
- * When GRO is in use then the second method has a lower overhead,
- * since we don't have to allocate then free skbs on reassembled frames.
- *
- * Values:
- * - RX_ALLOC_METHOD_AUTO = 0
- * - RX_ALLOC_METHOD_SKB = 1
- * - RX_ALLOC_METHOD_PAGE = 2
- *
- * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
- * controlled by the parameters below.
- *
- * - Since pushing and popping descriptors are separated by the rx_queue
- * size, so the watermarks should be ~rxd_size.
- * - The performance win by using page-based allocation for GRO is less
- * than the performance hit of using page-based allocation of non-GRO,
- * so the watermarks should reflect this.
- *
- * Per channel we maintain a single variable, updated by each channel:
- *
- * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
- * RX_ALLOC_FACTOR_SKB)
- * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
- * limits the hysteresis), and update the allocation strategy:
- *
- * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
- * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
- */
-static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-
-#define RX_ALLOC_LEVEL_GRO 0x2000
-#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_GRO 1
-#define RX_ALLOC_FACTOR_SKB (-2)
-
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
*/
static unsigned int rx_refill_threshold;
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
/*
* RX maximum head room required.
*
- * This must be at least 1 to prevent overflow and at least 2 to allow
- * pipelined receives.
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
*/
-#define EFX_RXD_HEAD_ROOM 2
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-/* Offset of ethernet header within page */
-static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
- struct efx_rx_buffer *buf)
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
- return buf->page_offset + efx->type->rx_buffer_hash_size;
-}
-static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
-{
- return PAGE_SIZE << efx->rx_buffer_order;
-}
-
-static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
-{
- if (buf->flags & EFX_RX_BUF_PAGE)
- return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
- else
- return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
+ return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
#endif
}
-/**
- * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
- * struct efx_rx_buffer for each one. Return a negative error code or 0
- * on success. May fail having only inserted fewer than EFX_RX_BATCH
- * buffers.
- */
-static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
+ L1_CACHE_BYTES);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- struct net_device *net_dev = efx->net_dev;
- struct efx_rx_buffer *rx_buf;
- struct sk_buff *skb;
- int skb_len = efx->rx_buffer_len;
- unsigned index, count;
+ struct page *page;
+ struct efx_rx_page_state *state;
+ unsigned index;
- for (count = 0; count < EFX_RX_BATCH; ++count) {
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
-
- rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!skb))
- return -ENOMEM;
-
- /* Adjust the SKB for padding */
- skb_reserve(skb, NET_IP_ALIGN);
- rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->flags = 0;
-
- rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
- skb->data, rx_buf->len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- rx_buf->dma_addr))) {
- dev_kfree_skb_any(skb);
- rx_buf->u.skb = NULL;
- return -EIO;
- }
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
- ++rx_queue->added_count;
- ++rx_queue->alloc_skb_count;
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
}
- return 0;
+ return NULL;
}
/**
- * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
+ * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
- * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
- * and populates struct efx_rx_buffers for each one. Return a negative error
- * code or 0 on success. If a single page can be split between two buffers,
- * then the page will either be inserted fully, or not at at all.
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
dma_addr_t dma_addr;
unsigned index, count;
- /* We can split a page between two buffers */
- BUILD_BUG_ON(EFX_RX_BATCH & 1);
-
- for (count = 0; count < EFX_RX_BATCH; ++count) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
- efx_rx_buf_size(efx),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
}
- state = page_address(page);
- state->refcnt = 0;
- state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);
- split:
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
- rx_buf->u.page = page;
- rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
- rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
- rx_buf->flags = EFX_RX_BUF_PAGE;
- ++rx_queue->added_count;
- ++rx_queue->alloc_page_count;
- ++state->refcnt;
-
- if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
- /* Use the second half of the page */
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
get_page(page);
- dma_addr += (PAGE_SIZE >> 1);
- page_offset += (PAGE_SIZE >> 1);
- ++count;
- goto split;
- }
- }
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
return 0;
}
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf,
- unsigned int used_len)
+ struct efx_rx_buffer *rx_buf)
{
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
- struct efx_rx_page_state *state;
-
- state = page_address(rx_buf->u.page);
- if (--state->refcnt == 0) {
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- efx_rx_buf_size(efx),
- DMA_FROM_DEVICE);
- } else if (used_len) {
- dma_sync_single_for_cpu(&efx->pci_dev->dev,
- rx_buf->dma_addr, used_len,
- DMA_FROM_DEVICE);
- }
- } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
- rx_buf->len, DMA_FROM_DEVICE);
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
}
}
-static void efx_free_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
{
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
- __free_pages(rx_buf->u.page, efx->rx_buffer_order);
- rx_buf->u.page = NULL;
- } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- dev_kfree_skb_any(rx_buf->u.skb);
- rx_buf->u.skb = NULL;
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
}
}
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
- efx_free_rx_buffer(rx_queue->efx, rx_buf);
-}
+ struct page *page = rx_buf->page;
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned index;
-/* Attempt to resurrect the other receive buffer that used to share this page,
- * which had previously been passed up to the kernel and freed. */
-static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- struct efx_rx_page_state *state = page_address(rx_buf->u.page);
- struct efx_rx_buffer *new_buf;
- unsigned fill_level, index;
-
- /* +1 because efx_rx_packet() incremented removed_count. +1 because
- * we'd like to insert an additional descriptor whilst leaving
- * EFX_RXD_HEAD_ROOM for the non-recycle path */
- fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
- if (unlikely(fill_level > rx_queue->max_fill)) {
- /* We could place "state" on a list, and drain the list in
- * efx_fast_push_rx_descriptors(). For now, this will do. */
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
return;
- }
- ++state->refcnt;
- get_page(rx_buf->u.page);
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
- index = rx_queue->added_count & rx_queue->ptr_mask;
- new_buf = efx_rx_buffer(rx_queue, index);
- new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
- new_buf->u.page = rx_buf->u.page;
- new_buf->len = rx_buf->len;
- new_buf->flags = EFX_RX_BUF_PAGE;
- ++rx_queue->added_count;
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
}
-/* Recycle the given rx buffer directly back into the rx_queue. There is
- * always room to add this buffer, because we've just popped a buffer. */
-static void efx_recycle_rx_buffer(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- struct efx_rx_buffer *new_buf;
- unsigned index;
-
- rx_buf->flags &= EFX_RX_BUF_PAGE;
-
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
- efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
- page_count(rx_buf->u.page) == 1)
- efx_resurrect_rx_buffer(rx_queue, rx_buf);
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffer(rx_buf);
+ }
+ rx_buf->page = NULL;
+}
- index = rx_queue->added_count & rx_queue->ptr_mask;
- new_buf = efx_rx_buffer(rx_queue, index);
+/* Recycle the pages that are used by buffers that have just been received. */
+static void efx_recycle_rx_buffers(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- memcpy(new_buf, rx_buf, sizeof(*new_buf));
- rx_buf->u.page = NULL;
- ++rx_queue->added_count;
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
}
/**
@@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
*/
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- unsigned fill_level;
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
int space, rc = 0;
/* Calculate current fill level, and exit if we don't need to fill */
@@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
rx_queue->min_fill = fill_level;
}
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
- EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
+ EFX_BUG_ON_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
- " level %d to level %d using %s allocation\n",
+ " level %d to level %d\n",
efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill,
- channel->rx_alloc_push_pages ? "page" : "skb");
+ rx_queue->max_fill);
+
do {
- if (channel->rx_alloc_push_pages)
- rc = efx_init_rx_buffers_page(rx_queue);
- else
- rc = efx_init_rx_buffers_skb(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
efx_schedule_slow_fill(rx_queue);
goto out;
}
- } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
+ } while ((space -= batch_size) >= batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
@@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context)
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
- int len, bool *leak_packet)
+ int len)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
- /* If this buffer was skb-allocated, then the meta
- * data at the end of the skb will be trashed. So
- * we have no choice but to leak the fragment.
- */
- *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
/* Pass a received packet up through GRO. GRO can handle pages
* regardless of checksum state and skbs with a good checksum.
*/
-static void efx_rx_packet_gro(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- const u8 *eh)
+static void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
- if (rx_buf->flags & EFX_RX_BUF_PAGE) {
- struct efx_nic *efx = channel->efx;
- struct page *page = rx_buf->u.page;
- struct sk_buff *skb;
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ while (n_frags--) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ return;
+ }
- rx_buf->u.page = NULL;
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(eh);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
- skb = napi_get_frags(napi);
- if (!skb) {
- put_page(page);
- return;
- }
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ gro_result = napi_gro_frags(napi);
+ if (gro_result != GRO_DROP)
+ channel->irq_mod_score += 2;
+}
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags,
+ u8 *eh, int hdr_len)
+{
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
- skb_fill_page_desc(skb, 0, page,
- efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
+ /* Allocate an SKB to store the headers */
+ skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ if (unlikely(skb == NULL))
+ return NULL;
- skb->len = rx_buf->len;
- skb->data_len = rx_buf->len;
- skb->truesize += rx_buf->len;
- skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
- CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
+ skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
+ memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
- gro_result = napi_gro_frags(napi);
- } else {
- struct sk_buff *skb = rx_buf->u.skb;
+ /* Append the remaining page(s) onto the frag list */
+ if (rx_buf->len > hdr_len) {
+ rx_buf->page_offset += hdr_len;
+ rx_buf->len -= hdr_len;
- EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
- rx_buf->u.skb = NULL;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ skb->data_len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
- gro_result = napi_gro_receive(napi, skb);
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ } else {
+ __free_pages(rx_buf->page, efx->rx_buffer_order);
+ rx_buf->page = NULL;
+ n_frags = 0;
}
- if (gro_result == GRO_NORMAL) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- } else if (gro_result != GRO_DROP) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
- channel->irq_mod_score += 2;
- }
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ return skb;
}
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, u16 flags)
+ unsigned int n_frags, unsigned int len, u16 flags)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
- bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
- /* This allows the refill path to post another buffer.
- * EFX_RXD_HEAD_ROOM ensures that the slot we are using
- * isn't overwritten yet.
- */
- rx_queue->removed_count++;
-
- /* Validate the length encoded in the event vs the descriptor pushed */
- efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
+ /* Validate the number of fragments and completed length */
+ if (n_frags == 1) {
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+ unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(!efx->rx_scatter)) {
+ /* If this isn't an explicit discard request, either
+ * the hardware or the driver is broken.
+ */
+ WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+ }
netif_vdbg(efx, rx_status, efx->net_dev,
- "RX queue %d received id %x at %llx+%x %s%s\n",
+ "RX queue %d received ids %x-%x len %d %s%s\n",
efx_rx_queue_index(rx_queue), index,
- (unsigned long long)rx_buf->dma_addr, len,
+ (index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
- /* Discard packet, if instructed to do so */
+ /* Discard packet, if instructed to do so. Process the
+ * previous receive first.
+ */
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
- if (unlikely(leak_packet))
- channel->n_skbuff_leaks++;
- else
- efx_recycle_rx_buffer(channel, rx_buf);
-
- /* Don't hold off the previous receive */
- rx_buf = NULL;
- goto out;
+ efx_rx_flush_packet(channel);
+ put_page(rx_buf->page);
+ efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+ return;
}
- /* Release and/or sync DMA mapping - assumes all RX buffers
- * consumed in-order per RX queue
+ if (n_frags == 1)
+ rx_buf->len = len;
+
+ /* Release and/or sync the DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue.
*/
- efx_unmap_rx_buffer(efx, rx_buf, len);
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
- prefetch(efx_rx_buf_eh(efx, rx_buf));
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->type->rx_buffer_hash_size;
+ rx_buf->len -= efx->type->rx_buffer_hash_size;
+
+ if (n_frags > 1) {
+ /* Release/sync DMA mapping for additional fragments.
+ * Fix length for last fragment.
+ */
+ unsigned int tail_frags = n_frags - 1;
+
+ for (;;) {
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ if (--tail_frags == 0)
+ break;
+ efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ }
+ rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+ }
+
+ /* All fragments have been DMA-synced, so recycle buffers and pages. */
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_recycle_rx_buffers(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len - efx->type->rx_buffer_hash_size;
-out:
- if (channel->rx_pkt)
- __efx_rx_packet(channel, channel->rx_pkt);
- channel->rx_pkt = rx_buf;
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = n_frags;
+ channel->rx_pkt_index = index;
}
-static void efx_rx_deliver(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct sk_buff *skb;
+ u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
- /* We now own the SKB */
- skb = rx_buf->u.skb;
- rx_buf->u.skb = NULL;
+ skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+ if (unlikely(skb == NULL)) {
+ efx_free_rx_buffer(rx_buf);
+ return;
+ }
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
/* Set the SKB flags */
skb_checksum_none_assert(skb);
- /* Record the rx_queue */
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
- /* Pass the packet up */
if (channel->type->receive_skb)
- channel->type->receive_skb(channel, skb);
- else
- netif_receive_skb(skb);
+ if (channel->type->receive_skb(channel, skb))
+ return;
- /* Update allocation strategy method */
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+ /* Pass the packet up */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
-void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
+void __efx_rx_packet(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- u8 *eh = efx_rx_buf_eh(efx, rx_buf);
+ struct efx_rx_buffer *rx_buf =
+ efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ u8 *eh = efx_rx_buf_va(rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- efx_free_rx_buffer(efx, rx_buf);
- return;
- }
-
- if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
- struct sk_buff *skb = rx_buf->u.skb;
-
- prefetch(skb_shinfo(skb));
-
- skb_reserve(skb, efx->type->rx_buffer_hash_size);
- skb_put(skb, rx_buf->len);
-
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
-
- /* Move past the ethernet header. rx_buf->data still points
- * at the ethernet header */
- skb->protocol = eth_type_trans(skb, efx->net_dev);
-
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
+ efx_free_rx_buffer(rx_buf);
+ goto out;
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
- !channel->type->receive_skb)
- efx_rx_packet_gro(channel, rx_buf, eh);
+ if (!channel->type->receive_skb)
+ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
- efx_rx_deliver(channel, rx_buf);
-}
-
-void efx_rx_strategy(struct efx_channel *channel)
-{
- enum efx_rx_alloc_method method = rx_alloc_method;
-
- if (channel->type->receive_skb) {
- channel->rx_alloc_push_pages = false;
- return;
- }
-
- /* Only makes sense to use page based allocation if GRO is enabled */
- if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
- method = RX_ALLOC_METHOD_SKB;
- } else if (method == RX_ALLOC_METHOD_AUTO) {
- /* Constrain the rx_alloc_level */
- if (channel->rx_alloc_level < 0)
- channel->rx_alloc_level = 0;
- else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
- channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
-
- /* Decide on the allocation method */
- method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
- RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
- }
-
- /* Push the option */
- channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
+ efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+ channel->rx_pkt_n_frags = 0;
}
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
@@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
+
return rc;
}
+static void efx_init_rx_recycle_ring(struct efx_nic *efx,
+ struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (efx->pci_dev->dev.iommu_group)
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
@@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(efx, rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
/* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger = max_fill - EFX_RX_BATCH;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
if (rx_refill_threshold != 0) {
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
if (trigger > max_trigger)
@@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
int i;
+ struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
- /* Release RX buffers NB start at index 0 not current HW ptr */
+ /* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
- for (i = 0; i <= rx_queue->ptr_mask; i++) {
- rx_buf = efx_rx_buffer(rx_queue, i);
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned index = i & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
}
-module_param(rx_alloc_method, int, 0644);
-MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
-
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ba40f67e4f0..51669244d15 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -202,7 +202,7 @@ out:
static enum reset_type siena_map_reset_reason(enum reset_type reason)
{
- return RESET_TYPE_ALL;
+ return RESET_TYPE_RECOVER_OR_ALL;
}
static int siena_map_reset_flags(u32 *flags)
@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
return efx_mcdi_reset_port(efx);
}
+#ifdef CONFIG_EEH
+/* When a PCI device is isolated from the bus, a subsequent MMIO read is
+ * required for the kernel EEH mechanisms to notice. As the Solarflare driver
+ * was written to minimise MMIO read (for latency) then a periodic call to check
+ * the EEH status of the device is required so that device recovery can happen
+ * in a timely fashion.
+ */
+static void siena_monitor(struct efx_nic *efx)
+{
+ struct eeh_dev *eehdev =
+ of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+
+ eeh_dev_check_failure(eehdev);
+}
+#endif
+
static int siena_probe_nvconfig(struct efx_nic *efx)
{
u32 caps = 0;
@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
+ EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */
@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = {
.init = siena_init_nic,
.dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void,
+#ifdef CONFIG_EEH
+ .monitor = siena_monitor,
+#else
.monitor = NULL,
+#endif
.map_reset_reason = siena_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw,
@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
+ .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 79ad9c94a21..4bdbaad9932 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma, GFP_ATOMIC);
+ &priv->tx_ring_dma,
+ GFP_ATOMIC | __GFP_ZERO);
if (!priv->tx_ring)
return -ENOMEM;
- memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
+
priv->tx_count = priv->tx_read = priv->tx_write = 0;
mace->eth.tx_ring_base = priv->tx_ring_dma;
/* Now init skb save area */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index efca14eaefa..e45829628d5 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1841,15 +1841,12 @@ refill_rx_ring:
entry = sis_priv->dirty_rx % NUM_RX_DESC;
if (sis_priv->rx_skbuff[entry] == NULL) {
- if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
+ skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
+ if (skb == NULL) {
/* not enough memory for skbuff, this makes a
* "hole" on the buffer ring, it is not clear
* how the hardware will react to this kind
* of degenerated buffer */
- if (netif_msg_rx_err(sis_priv))
- printk(KERN_INFO "%s: Memory squeeze, "
- "deferring packet.\n",
- net_dev->name);
net_dev->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 50823da9dc1..e85c2e7e824 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev)
dev->stats.multicast++;
skb = netdev_alloc_skb(dev, packet_length + 5);
-
if ( skb == NULL ) {
- printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
dev->stats.rx_dropped++;
goto done;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 591650a8de3..dfbf978315d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev)
*/
skb = netdev_alloc_skb(dev, packet_len);
if (unlikely(skb == NULL)) {
- printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
- dev->name);
SMC_WAIT_MMU_BUSY(lp);
SMC_SET_MMU_CMD(lp, MC_RELEASE);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index da5cc9a3b34..48e2b99bec5 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2115,7 +2115,7 @@ static int smsc911x_init(struct net_device *dev)
spin_lock_init(&pdata->dev_lock);
spin_lock_init(&pdata->mac_lock);
- if (pdata->ioaddr == 0) {
+ if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d457fa2d750..ffa5c4ad121 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
BUG_ON(pd->rx_buffers[index].skb);
BUG_ON(pd->rx_buffers[index].mapping);
- if (unlikely(!skb)) {
- smsc_warn(RX_ERR, "Failed to allocate new skb!");
+ if (unlikely(!skb))
return -ENOMEM;
- }
mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 39c6c552463..d02b446037d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -534,25 +534,17 @@ static void init_dma_desc_rings(struct net_device *dev)
GFP_KERNEL);
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
GFP_KERNEL);
- priv->dma_rx =
- (struct dma_desc *)dma_alloc_coherent(priv->device,
- rxsize *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
+ priv->dma_rx = dma_alloc_coherent(priv->device,
+ rxsize * sizeof(struct dma_desc),
+ &priv->dma_rx_phy, GFP_KERNEL);
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
GFP_KERNEL);
- priv->dma_tx =
- (struct dma_desc *)dma_alloc_coherent(priv->device,
- txsize *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
-
- if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
- pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+ priv->dma_tx = dma_alloc_coherent(priv->device,
+ txsize * sizeof(struct dma_desc),
+ &priv->dma_tx_phy, GFP_KERNEL);
+
+ if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL))
return;
- }
DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
"Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 5fafca06530..054975939a1 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op,
bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
PAGE_SIZE,
&bp->bblock_dvma, GFP_ATOMIC);
- if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
- printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
+ if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
goto fail_and_cleanup;
- }
/* Get the board revision of this BigMAC. */
bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index a1bff49a815..436fa9d5a07 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
&hp->hblock_dvma,
GFP_ATOMIC);
err = -ENOMEM;
- if (!hp->happy_block) {
- printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
+ if (!hp->happy_block)
goto err_out_iounmap;
- }
/* Force check of the link first time we are brought up. */
hp->linkcheck = 0;
@@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS;
#endif
- hp->happy_block = (struct hmeal_init_block *)
- dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
-
+ hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &hp->hblock_dvma, GFP_KERNEL);
err = -ENODEV;
- if (!hp->happy_block) {
- printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
+ if (!hp->happy_block)
goto err_out_iounmap;
- }
hp->linkcheck = 0;
hp->timer_state = asleep;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 49bf3e2eb65..8182591bc18 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
- int elem = qep->rx_new, drops = 0;
+ int elem = qep->rx_new;
u32 flags;
this = &rxbase[elem];
@@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep)
} else {
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- drops++;
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
@@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep)
this = &rxbase[elem];
}
qep->rx_new = elem;
- if (drops)
- printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
}
static void qe_tx_reclaim(struct sunqe *qep);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e15cc71b826..e8824cea093 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
- if (!skb) {
- pr_err("NO MEM: netdev_alloc_skb failed\n");
+ if (!skb)
break;
- }
+
skb_reserve(skb, NET_IP_ALIGN);
idx = bdx_rxdb_alloc_elem(db);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 75c48558e6f..5ceaa4c3cfa 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -126,6 +126,13 @@ do { \
#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+#define CPSW_INTPACEEN (0x3f << 16)
+#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
+#define CPSW_CMINTMAX_CNT 63
+#define CPSW_CMINTMIN_CNT 2
+#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -139,6 +146,10 @@ do { \
disable_irq_nosync(priv->irqs_table[i]); \
} while (0);
+#define cpsw_slave_index(priv) \
+ ((priv->data.dual_emac) ? priv->emac_port : \
+ priv->data.active_slave)
+
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -160,6 +171,15 @@ struct cpsw_wr_regs {
u32 rx_en;
u32 tx_en;
u32 misc_en;
+ u32 mem_allign1[8];
+ u32 rx_thresh_stat;
+ u32 rx_stat;
+ u32 tx_stat;
+ u32 misc_stat;
+ u32 mem_allign2[8];
+ u32 rx_imax;
+ u32 tx_imax;
+
};
struct cpsw_ss_regs {
@@ -314,6 +334,8 @@ struct cpsw_priv {
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
u32 version;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
struct net_device_stats stats;
int rx_packet_max;
int host_port;
@@ -612,6 +634,77 @@ static void cpsw_adjust_link(struct net_device *ndev)
}
}
+static int cpsw_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ coal->rx_coalesce_usecs = priv->coal_intvl;
+ return 0;
+}
+
+static int cpsw_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl;
+ u32 num_interrupts = 0;
+ u32 prescale = 0;
+ u32 addnl_dvdr = 1;
+ u32 coal_intvl = 0;
+
+ if (!coal->rx_coalesce_usecs)
+ return -EINVAL;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ int_ctrl = readl(&priv->wr_regs->int_control);
+ prescale = priv->bus_freq_mhz * 4;
+
+ if (coal_intvl < CPSW_CMINTMIN_INTVL)
+ coal_intvl = CPSW_CMINTMIN_INTVL;
+
+ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+ /* Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+ coal_intvl = (CPSW_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = CPSW_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+ writel(num_interrupts, &priv->wr_regs->rx_imax);
+ writel(num_interrupts, &priv->wr_regs->tx_imax);
+
+ int_ctrl |= CPSW_INTPACEEN;
+ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+ writel(int_ctrl, &priv->wr_regs->int_control);
+
+ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ priv = netdev_priv(priv->slaves[i].ndev);
+ priv->coal_intvl = coal_intvl;
+ }
+ } else {
+ priv->coal_intvl = coal_intvl;
+ }
+
+ return 0;
+}
+
static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
{
static char *leader = "........................................";
@@ -834,6 +927,14 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
+ /* Enable Interrupt pacing if configured */
+ if (priv->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ cpsw_set_coalesce(ndev, &coal);
+ }
+
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
@@ -942,7 +1043,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
u32 ts_en, seq_id;
if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
@@ -971,7 +1072,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->data.dual_emac)
slave = &priv->slaves[priv->emac_port];
else
- slave = &priv->slaves[priv->data.cpts_active_slave];
+ slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
@@ -1056,14 +1157,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(req);
+ int slave_no = cpsw_slave_index(priv);
+
if (!netif_running(dev))
return -EINVAL;
+ switch (cmd) {
#ifdef CONFIG_TI_CPTS
- if (cmd == SIOCSHWTSTAMP)
+ case SIOCSHWTSTAMP:
return cpsw_hwtstamp_ioctl(dev, req);
#endif
- return -ENOTSUPP;
+ case SIOCGMIIPHY:
+ data->phy_id = priv->slaves[slave_no].phy->addr;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1244,12 +1357,39 @@ static int cpsw_get_ts_info(struct net_device *ndev,
return 0;
}
+static int cpsw_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = cpsw_get_ts_info,
+ .get_settings = cpsw_get_settings,
+ .set_settings = cpsw_set_settings,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1282,12 +1422,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->slaves = prop;
- if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
- pr_err("Missing cpts_active_slave property in the DT.\n");
+ if (of_property_read_u32(node, "active_slave", &prop)) {
+ pr_err("Missing active_slave property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->cpts_active_slave = prop;
+ data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
pr_err("Missing cpts_clock_mult property in the DT.\n");
@@ -1437,6 +1577,9 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->slaves = priv->slaves;
priv_sl2->clk = priv->clk;
+ priv_sl2->coal_intvl = 0;
+ priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
+
priv_sl2->cpsw_res = priv->cpsw_res;
priv_sl2->regs = priv->regs;
priv_sl2->host_port = priv->host_port;
@@ -1546,6 +1689,8 @@ static int cpsw_probe(struct platform_device *pdev)
ret = -ENODEV;
goto clean_slave_ret;
}
+ priv->coal_intvl = 0;
+ priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!priv->cpsw_res) {
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 22725386c5d..bdda36f8e54 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1911,10 +1911,8 @@ static void tlan_reset_lists(struct net_device *dev)
list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if (!skb) {
- netdev_err(dev, "Out of memory for received data\n");
+ if (!skb)
break;
- }
list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f1b91fd7e41..fef6b59e69c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card,
alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
-
+ &chain->dma_addr, GFP_KERNEL);
if (!chain->hwring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fa947a2d92..3c69a046083 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,27 +1308,16 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_alloc_coherent(NULL, rxring_size,
- &data->rxdma, GFP_KERNEL);
-
- if (!data->rxring) {
- printk(KERN_DEBUG
- "TSI108_ETH: failed to allocate memory for rxring!\n");
+ data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!data->rxring)
return -ENOMEM;
- } else {
- memset(data->rxring, 0, rxring_size);
- }
-
- data->txring = dma_alloc_coherent(NULL, txring_size,
- &data->txdma, GFP_KERNEL);
+ data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
+ GFP_KERNEL | __GFP_ZERO);
if (!data->txring) {
- printk(KERN_DEBUG
- "TSI108_ETH: failed to allocate memory for txring!\n");
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
- } else {
- memset(data->txring, 0, txring_size);
}
for (i = 0; i < TSI108_RXRING_LEN; i++) {
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9fc2ada4c3c..4a7c60f4c83 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -245,39 +245,30 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v) {
- dev_err(&ndev->dev,
- "unable to allocate DMA TX buffer descriptors");
+ &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ if (!lp->tx_bd_v)
goto out;
- }
+
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v) {
- dev_err(&ndev->dev,
- "unable to allocate DMA RX buffer descriptors");
+ &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ if (!lp->rx_bd_v)
goto out;
- }
- memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
- memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
-
- if (skb == 0) {
- dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ if (!skb)
goto out;
- }
+
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -789,9 +780,7 @@ static void ll_temac_recv(struct net_device *ndev)
new_skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
-
- if (new_skb == 0) {
- dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ if (!new_skb) {
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 278c9db3b5b..24748e8367a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -204,41 +204,31 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p,
- GFP_KERNEL);
- if (!lp->tx_bd_v) {
- dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
- "descriptors");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lp->tx_bd_v)
goto out;
- }
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p,
- GFP_KERNEL);
- if (!lp->rx_bd_v) {
- dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
- "descriptors");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lp->rx_bd_v)
goto out;
- }
- memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) *
((i + 1) % TX_BD_NUM);
}
- memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) *
((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!skb) {
- dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ if (!skb)
goto out;
- }
lp->rx_bd_v[i].sw_id_offset = (u32) skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -777,10 +767,9 @@ static void axienet_recv(struct net_device *ndev)
packets++;
new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!new_skb) {
- dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ if (!new_skb)
return;
- }
+
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 98e09d0d3ce..76210abf2e9 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
/* 1 extra so we can use insw */
skb = netdev_alloc_skb(dev, pktlen + 3);
if (!skb) {
- pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
dev->stats.rx_dropped++;
} else { /* okay get the packet */
skb_reserve(skb, 2);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 502c8ff1d98..4c8ddc944d5 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1070,13 +1070,10 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
(PI_ALIGN_K_DESC_BLK - 1);
bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
&bp->kmalloced_dma,
- GFP_ATOMIC);
- if (top_v == NULL) {
- printk("%s: Could not allocate memory for host buffers "
- "and structures!\n", print_name);
+ GFP_ATOMIC | __GFP_ZERO);
+ if (top_v == NULL)
return DFX_K_FAILURE;
- }
- memset(top_v, 0, alloc_size); /* zero out memory before continuing */
+
top_p = bp->kmalloced_dma; /* get physical address of buffer */
/*
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 3f2c7aaf28c..0ca8f88ac53 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -22,6 +22,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <net/wpan-phy.h>
#include <net/mac802154.h>
@@ -91,9 +92,8 @@ struct mrf24j40 {
#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
-/* Maximum speed to run the device at. TODO: Get the real max value from
- * someone at Microchip since it isn't in the datasheet. */
-#define MAX_SPI_SPEED_HZ 1000000
+/* The datasheet indicates the theoretical maximum for SCK to be 10MHz */
+#define MAX_SPI_SPEED_HZ 10000000
#define printdev(X) (&X->spi->dev)
@@ -361,6 +361,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
if (ret == -ERESTARTSYS)
goto err;
if (ret == 0) {
+ dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n");
ret = -ETIMEDOUT;
goto err;
}
@@ -477,7 +478,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
int i;
for (i = 0; i < 8; i++)
write_short_reg(devrec, REG_EADR0+i,
- filt->ieee_addr[i]);
+ filt->ieee_addr[7-i]);
#ifdef DEBUG
printk(KERN_DEBUG "Set long addr to: ");
@@ -623,6 +624,7 @@ static int mrf24j40_probe(struct spi_device *spi)
int ret = -ENOMEM;
u8 val;
struct mrf24j40 *devrec;
+ struct pinctrl *pinctrl;
printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
@@ -633,6 +635,11 @@ static int mrf24j40_probe(struct spi_device *spi)
if (!devrec->buf)
goto err_buf;
+ pinctrl = devm_pinctrl_get_select_default(&spi->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&spi->dev,
+ "pinctrl pins are not configured from the driver");
+
spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
spi->max_speed_hz = MAX_SPI_SPEED_HZ;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 9cea451a608..3adb43ce138 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -352,21 +352,19 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5151e4ced6..56f1e6d6e4e 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/types.h>
+#include <linux/ioport.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -882,12 +883,12 @@ static int au1k_irda_probe(struct platform_device *pdev)
goto out;
err = -EBUSY;
- aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ aup->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!aup->ioarea)
goto out;
- aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
+ aup->iobase = ioremap_nocache(r->start, resource_size(r));
if (!aup->iobase)
goto out2;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index fed4a05d55c..a06fca61c9a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -389,7 +389,8 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
- port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
+ port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_handle, GFP_DMA);
port->rx_dma_buf.head = 0;
port->rx_dma_buf.tail = 0;
port->rx_dma_nrows = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2a4f2f15324..9cf836b57c4 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -431,22 +431,20 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto out2;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto out3;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 858de05bdb7..964b116a0ab 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -700,12 +700,12 @@ static int pxa_irda_start(struct net_device *dev)
err = -ENOMEM;
si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
- &si->dma_rx_buff_phy, GFP_KERNEL );
+ &si->dma_rx_buff_phy, GFP_KERNEL);
if (!si->dma_rx_buff)
goto err_dma_rx_buff;
si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
- &si->dma_tx_buff_phy, GFP_KERNEL );
+ &si->dma_tx_buff_phy, GFP_KERNEL);
if (!si->dma_tx_buff)
goto err_dma_tx_buff;
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 5290952b60c..aa05dad7533 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -563,24 +563,15 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
- if (self->rx_buff.head == NULL) {
- IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
- driver_name);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ if (self->rx_buff.head == NULL)
goto err_out2;
- }
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
- if (self->tx_buff.head == NULL) {
- IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
- driver_name);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ if (self->tx_buff.head == NULL)
goto err_out3;
- }
-
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index f9033c6a888..51f2bc37610 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -364,21 +364,19 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f5bb92f1588..bb8857a158a 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -216,22 +216,19 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out1;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
-
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ec40ba882f6..ff2e45e9cb5 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -159,7 +159,7 @@ static int lxt973a2_update_link(struct phy_device *phydev)
return 0;
}
-int lxt973a2_read_status(struct phy_device *phydev)
+static int lxt973a2_read_status(struct phy_device *phydev)
{
int adv;
int err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22dec9c7ef0..202fe1ff198 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -7,6 +7,8 @@
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
+ * Copyright (c) 2013 Michael Stapelberg <michael@stapelberg.de>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -80,6 +82,28 @@
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
+/* Copper Specific Interrupt Enable Register */
+#define MII_88E1318S_PHY_CSIER 0x12
+/* WOL Event Interrupt Enable */
+#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
+
+/* LED Timer Control Register */
+#define MII_88E1318S_PHY_LED_PAGE 0x03
+#define MII_88E1318S_PHY_LED_TCR 0x12
+#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
+#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
+#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11)
+
+/* Magic Packet MAC address registers */
+#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17
+#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18
+#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19
+
+#define MII_88E1318S_PHY_WOL_PAGE 0x11
+#define MII_88E1318S_PHY_WOL_CTRL 0x10
+#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
+#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
+
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
@@ -696,6 +720,107 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
return 0;
}
+static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_WOL_PAGE) < 0)
+ return;
+
+ if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00) < 0)
+ return;
+}
+
+static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ int err, oldpage, temp;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Explicitly switch to page 0x00, just to be sure */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00);
+ if (err < 0)
+ return err;
+
+ /* Enable the WOL interrupt */
+ temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
+ temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
+ err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Setup LED[2] as interrupt pin (active low) */
+ temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
+ temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
+ temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
+ temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
+ err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_WOL_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Store the device address for the magic packet */
+ err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+ ((phydev->attached_dev->dev_addr[5] << 8) |
+ phydev->attached_dev->dev_addr[4]));
+ if (err < 0)
+ return err;
+ err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+ ((phydev->attached_dev->dev_addr[3] << 8) |
+ phydev->attached_dev->dev_addr[2]));
+ if (err < 0)
+ return err;
+ err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+ ((phydev->attached_dev->dev_addr[1] << 8) |
+ phydev->attached_dev->dev_addr[0]));
+ if (err < 0)
+ return err;
+
+ /* Clear WOL status and enable magic packet matching */
+ temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+ temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
+ temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
+ err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ if (err < 0)
+ return err;
+ } else {
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_WOL_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Clear WOL status and disable magic packet matching */
+ temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+ temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
+ temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
+ err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ if (err < 0)
+ return err;
+ }
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -772,6 +897,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .get_wol = &m88e1318_get_wol,
+ .set_wol = &m88e1318_set_wol,
.driver = { .owner = THIS_MODULE },
},
{
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index abf7b6153d0..2510435f34e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -53,6 +53,18 @@
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+static int ksz_config_flags(struct phy_device *phydev)
+{
+ int regval;
+
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ regval = phy_read(phydev, MII_KSZPHY_CTRL);
+ regval |= KSZ8051_RMII_50MHZ_CLK;
+ return phy_write(phydev, MII_KSZPHY_CTRL, regval);
+ }
+ return 0;
+}
+
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
/* bit[7..0] int status, which is a read and clear register. */
@@ -114,22 +126,19 @@ static int kszphy_config_init(struct phy_device *phydev)
static int ksz8021_config_init(struct phy_device *phydev)
{
+ int rc;
const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
phy_write(phydev, MII_KSZPHY_OMSO, val);
- return 0;
+ rc = ksz_config_flags(phydev);
+ return rc < 0 ? rc : 0;
}
static int ks8051_config_init(struct phy_device *phydev)
{
- int regval;
-
- if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
- regval = phy_read(phydev, MII_KSZPHY_CTRL);
- regval |= KSZ8051_RMII_50MHZ_CLK;
- phy_write(phydev, MII_KSZPHY_CTRL, regval);
- }
+ int rc;
- return 0;
+ rc = ksz_config_flags(phydev);
+ return rc < 0 ? rc : 0;
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -192,6 +201,19 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_KSZ8031,
+ .phy_id_mask = 0x00ffffff,
+ .name = "Micrel KSZ8031",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = ksz8021_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ8041",
@@ -325,6 +347,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KSZ8021, 0x00ffffff },
+ { PHY_ID_KSZ8031, 0x00ffffff },
{ PHY_ID_KSZ8041, 0x00fffff0 },
{ PHY_ID_KSZ8051, 0x00fffff0 },
{ PHY_ID_KSZ8061, 0x00fffff0 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef9ea924822..298b4c20173 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1188,3 +1188,19 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
return 0;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
+
+int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ if (phydev->drv->set_wol)
+ return phydev->drv->set_wol(phydev, wol);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(phy_ethtool_set_wol);
+
+void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ if (phydev->drv->get_wol)
+ phydev->drv->get_wol(phydev, wol);
+}
+EXPORT_SYMBOL(phy_ethtool_get_wol);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2585c383e62..3492b539127 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -61,7 +61,7 @@ MODULE_DESCRIPTION("Vitesse PHY driver");
MODULE_AUTHOR("Kriston Carson");
MODULE_LICENSE("GPL");
-int vsc824x_add_skew(struct phy_device *phydev)
+static int vsc824x_add_skew(struct phy_device *phydev)
{
int err;
int extcon;
@@ -81,7 +81,6 @@ int vsc824x_add_skew(struct phy_device *phydev)
return err;
}
-EXPORT_SYMBOL(vsc824x_add_skew);
static int vsc824x_config_init(struct phy_device *phydev)
{
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index c3011af68e9..c853d84fd99 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -37,6 +37,18 @@ config NET_TEAM_MODE_ROUNDROBIN
To compile this team mode as a module, choose M here: the module
will be called team_mode_roundrobin.
+config NET_TEAM_MODE_RANDOM
+ tristate "Random mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where port used for transmitting packets is selected
+ randomly.
+
+ All added ports are setup to have team's device address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_random.
+
config NET_TEAM_MODE_ACTIVEBACKUP
tristate "Active-backup mode support"
depends on NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 975763014e5..c57e8588975 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_NET_TEAM) += team.o
obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_RANDOM) += team_mode_random.o
obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bf341929787..621c1bddeee 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port)
return __set_port_dev_addr(port->dev, port->orig.dev_addr);
}
-int team_port_set_team_dev_addr(struct team_port *port)
+static int team_port_set_team_dev_addr(struct team *team,
+ struct team_port *port)
+{
+ return __set_port_dev_addr(port->dev, team->dev->dev_addr);
+}
+
+int team_modeop_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_dev_addr(team, port);
+}
+EXPORT_SYMBOL(team_modeop_port_enter);
+
+void team_modeop_port_change_dev_addr(struct team *team,
+ struct team_port *port)
{
- return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
+ team_port_set_team_dev_addr(team, port);
}
-EXPORT_SYMBOL(team_port_set_team_dev_addr);
+EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
static void team_refresh_port_linkup(struct team_port *port)
{
@@ -490,9 +503,9 @@ static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
return false;
}
-rx_handler_result_t team_dummy_receive(struct team *team,
- struct team_port *port,
- struct sk_buff *skb)
+static rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
{
return RX_HANDLER_ANOTHER;
}
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c5db428e73f..c366cd299c0 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -46,20 +46,10 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
return sum_ret;
}
-static int bc_port_enter(struct team *team, struct team_port *port)
-{
- return team_port_set_team_dev_addr(port);
-}
-
-static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
-{
- team_port_set_team_dev_addr(port);
-}
-
static const struct team_mode_ops bc_mode_ops = {
.transmit = bc_transmit,
- .port_enter = bc_port_enter,
- .port_change_dev_addr = bc_port_change_dev_addr,
+ .port_enter = team_modeop_port_enter,
+ .port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
new file mode 100644
index 00000000000..9eabfaa22f3
--- /dev/null
+++ b/drivers/net/team/team_mode_random.c
@@ -0,0 +1,71 @@
+/*
+ * drivers/net/team/team_mode_random.c - Random mode for team
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/if_team.h>
+
+static u32 random_N(unsigned int N)
+{
+ return reciprocal_divide(random32(), N);
+}
+
+static bool rnd_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *port;
+ int port_index;
+
+ port_index = random_N(team->en_port_count);
+ port = team_get_port_by_index_rcu(team, port_index);
+ port = team_get_first_port_txable_rcu(team, port);
+ if (unlikely(!port))
+ goto drop;
+ if (team_dev_queue_xmit(team, port, skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static const struct team_mode_ops rnd_mode_ops = {
+ .transmit = rnd_transmit,
+ .port_enter = team_modeop_port_enter,
+ .port_change_dev_addr = team_modeop_port_change_dev_addr,
+};
+
+static const struct team_mode rnd_mode = {
+ .kind = "random",
+ .owner = THIS_MODULE,
+ .ops = &rnd_mode_ops,
+};
+
+static int __init rnd_init_module(void)
+{
+ return team_mode_register(&rnd_mode);
+}
+
+static void __exit rnd_cleanup_module(void)
+{
+ team_mode_unregister(&rnd_mode);
+}
+
+module_init(rnd_init_module);
+module_exit(rnd_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("Random mode for team");
+MODULE_ALIAS("team-mode-random");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 105135aa8f0..d268e4de781 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -25,26 +25,6 @@ static struct rr_priv *rr_priv(struct team *team)
return (struct rr_priv *) &team->mode_priv;
}
-static struct team_port *__get_first_port_up(struct team *team,
- struct team_port *port)
-{
- struct team_port *cur;
-
- if (team_port_txable(port))
- return port;
- cur = port;
- list_for_each_entry_continue_rcu(cur, &team->port_list, list)
- if (team_port_txable(port))
- return cur;
- list_for_each_entry_rcu(cur, &team->port_list, list) {
- if (cur == port)
- break;
- if (team_port_txable(port))
- return cur;
- }
- return NULL;
-}
-
static bool rr_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
@@ -52,7 +32,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
port = team_get_port_by_index_rcu(team, port_index);
- port = __get_first_port_up(team, port);
+ port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
if (team_dev_queue_xmit(team, port, skb))
@@ -64,20 +44,10 @@ drop:
return false;
}
-static int rr_port_enter(struct team *team, struct team_port *port)
-{
- return team_port_set_team_dev_addr(port);
-}
-
-static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
-{
- team_port_set_team_dev_addr(port);
-}
-
static const struct team_mode_ops rr_mode_ops = {
.transmit = rr_transmit,
- .port_enter = rr_port_enter,
- .port_change_dev_addr = rr_port_change_dev_addr,
+ .port_enter = team_modeop_port_enter,
+ .port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode rr_mode = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b7c457adc0d..95837c1b197 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -409,14 +409,12 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
struct tun_struct *tun;
- struct net_device *dev;
tun = rtnl_dereference(tfile->tun);
if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
- dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7cee7a3068e..33427fd6251 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -81,13 +81,22 @@ struct vxlan_net {
struct hlist_head vni_list[VNI_HASH_SIZE];
};
+struct vxlan_rdst {
+ struct rcu_head rcu;
+ __be32 remote_ip;
+ __be16 remote_port;
+ u32 remote_vni;
+ u32 remote_ifindex;
+ struct vxlan_rdst *remote_next;
+};
+
/* Forwarding table entry */
struct vxlan_fdb {
struct hlist_node hlist; /* linked list of entries */
struct rcu_head rcu;
unsigned long updated; /* jiffies */
unsigned long used;
- __be32 remote_ip;
+ struct vxlan_rdst remote;
u16 state; /* see ndm_state */
u8 eth_addr[ETH_ALEN];
};
@@ -157,7 +166,8 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
/* Fill in neighbour message in skbuff. */
static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
- u32 portid, u32 seq, int type, unsigned int flags)
+ u32 portid, u32 seq, int type, unsigned int flags,
+ const struct vxlan_rdst *rdst)
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
@@ -176,7 +186,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (type == RTM_GETNEIGH) {
ndm->ndm_family = AF_INET;
- send_ip = fdb->remote_ip != 0;
+ send_ip = rdst->remote_ip != htonl(INADDR_ANY);
send_eth = !is_zero_ether_addr(fdb->eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
@@ -188,7 +198,17 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
- if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+ if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
+ goto nla_put_failure;
+
+ if (rdst->remote_port && rdst->remote_port != vxlan_port &&
+ nla_put_be16(skb, NDA_PORT, rdst->remote_port))
+ goto nla_put_failure;
+ if (rdst->remote_vni != vxlan->vni &&
+ nla_put_be32(skb, NDA_VNI, rdst->remote_vni))
+ goto nla_put_failure;
+ if (rdst->remote_ifindex &&
+ nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
@@ -211,6 +231,9 @@ static inline size_t vxlan_nlmsg_size(void)
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(__be32)) /* NDA_DST */
+ + nla_total_size(sizeof(__be32)) /* NDA_PORT */
+ + nla_total_size(sizeof(__be32)) /* NDA_VNI */
+ + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
@@ -225,7 +248,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
if (skb == NULL)
goto errout;
- err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
+ err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote);
if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -247,7 +270,8 @@ static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
memset(&f, 0, sizeof f);
f.state = NUD_STALE;
- f.remote_ip = ipa; /* goes to NDA_DST */
+ f.remote.remote_ip = ipa; /* goes to NDA_DST */
+ f.remote.remote_vni = VXLAN_N_VID;
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
}
@@ -300,10 +324,38 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
return NULL;
}
+/* Add/update destinations for multicast */
+static int vxlan_fdb_append(struct vxlan_fdb *f,
+ __be32 ip, __u32 port, __u32 vni, __u32 ifindex)
+{
+ struct vxlan_rdst *rd_prev, *rd;
+
+ rd_prev = NULL;
+ for (rd = &f->remote; rd; rd = rd->remote_next) {
+ if (rd->remote_ip == ip &&
+ rd->remote_port == port &&
+ rd->remote_vni == vni &&
+ rd->remote_ifindex == ifindex)
+ return 0;
+ rd_prev = rd;
+ }
+ rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
+ if (rd == NULL)
+ return -ENOBUFS;
+ rd->remote_ip = ip;
+ rd->remote_port = port;
+ rd->remote_vni = vni;
+ rd->remote_ifindex = ifindex;
+ rd->remote_next = NULL;
+ rd_prev->remote_next = rd;
+ return 1;
+}
+
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, __be32 ip,
- __u16 state, __u16 flags)
+ __u16 state, __u16 flags,
+ __u32 port, __u32 vni, __u32 ifindex)
{
struct vxlan_fdb *f;
int notify = 0;
@@ -320,6 +372,14 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
+ if ((flags & NLM_F_APPEND) &&
+ is_multicast_ether_addr(f->eth_addr)) {
+ int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
+
+ if (rc < 0)
+ return rc;
+ notify |= rc;
+ }
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
@@ -333,7 +393,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return -ENOMEM;
notify = 1;
- f->remote_ip = ip;
+ f->remote.remote_ip = ip;
+ f->remote.remote_port = port;
+ f->remote.remote_vni = vni;
+ f->remote.remote_ifindex = ifindex;
+ f->remote.remote_next = NULL;
f->state = state;
f->updated = f->used = jiffies;
memcpy(f->eth_addr, mac, ETH_ALEN);
@@ -349,6 +413,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return 0;
}
+void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+
+ while (f->remote.remote_next) {
+ struct vxlan_rdst *rd = f->remote.remote_next;
+
+ f->remote.remote_next = rd->remote_next;
+ kfree(rd);
+ }
+ kfree(f);
+}
+
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
{
netdev_dbg(vxlan->dev,
@@ -358,7 +435,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
- kfree_rcu(f, rcu);
+ call_rcu(&f->rcu, vxlan_fdb_free);
}
/* Add static entry (via netlink) */
@@ -367,7 +444,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr, u16 flags)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct net *net = dev_net(vxlan->dev);
__be32 ip;
+ u32 port, vni, ifindex;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -384,8 +463,36 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
ip = nla_get_be32(tb[NDA_DST]);
+ if (tb[NDA_PORT]) {
+ if (nla_len(tb[NDA_PORT]) != sizeof(u32))
+ return -EINVAL;
+ port = nla_get_u32(tb[NDA_PORT]);
+ } else
+ port = vxlan_port;
+
+ if (tb[NDA_VNI]) {
+ if (nla_len(tb[NDA_VNI]) != sizeof(u32))
+ return -EINVAL;
+ vni = nla_get_u32(tb[NDA_VNI]);
+ } else
+ vni = vxlan->vni;
+
+ if (tb[NDA_IFINDEX]) {
+ struct net_device *dev;
+
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ return -EINVAL;
+ ifindex = nla_get_u32(tb[NDA_IFINDEX]);
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev)
+ return -EADDRNOTAVAIL;
+ dev_put(dev);
+ } else
+ ifindex = 0;
+
spin_lock_bh(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
+ err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, port,
+ vni, ifindex);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -423,18 +530,21 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
int err;
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
- if (idx < cb->args[0])
- goto skip;
-
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI);
- if (err < 0)
- break;
+ struct vxlan_rdst *rd;
+ for (rd = &f->remote; rd; rd = rd->remote_next) {
+ if (idx < cb->args[0])
+ goto skip;
+
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI, rd);
+ if (err < 0)
+ break;
skip:
- ++idx;
+ ++idx;
+ }
}
}
@@ -454,22 +564,23 @@ static void vxlan_snoop(struct net_device *dev,
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
f->used = jiffies;
- if (likely(f->remote_ip == src_ip))
+ if (likely(f->remote.remote_ip == src_ip))
return;
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pI4 to %pI4\n",
- src_mac, &f->remote_ip, &src_ip);
+ src_mac, &f->remote.remote_ip, &src_ip);
- f->remote_ip = src_ip;
+ f->remote.remote_ip = src_ip;
f->updated = jiffies;
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, src_mac, src_ip,
NUD_REACHABLE,
- NLM_F_EXCL|NLM_F_CREATE);
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan_port, vxlan->vni, 0);
spin_unlock(&vxlan->hash_lock);
}
}
@@ -701,7 +812,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
}
f = vxlan_find_mac(vxlan, n->ha);
- if (f && f->remote_ip == 0) {
+ if (f && f->remote.remote_ip == htonl(INADDR_ANY)) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
@@ -820,48 +931,40 @@ static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
return (((u64) hash * range) >> 32) + vxlan->port_min;
}
-/* Transmit local packets over Vxlan
- *
- * Outer IP header inherits ECN and DF from inner header.
- * Outer UDP destination is the VXLAN assigned port.
- * source port is based on hash of flow
- */
-static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+static int handle_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+
+ skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP);
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 0;
+}
+
+static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ struct vxlan_rdst *rdst, bool did_rsc)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct rtable *rt;
const struct iphdr *old_iph;
- struct ethhdr *eth;
struct iphdr *iph;
struct vxlanhdr *vxh;
struct udphdr *uh;
struct flowi4 fl4;
unsigned int pkt_len = skb->len;
__be32 dst;
- __u16 src_port;
+ __u16 src_port, dst_port;
+ u32 vni;
__be16 df = 0;
__u8 tos, ttl;
- int err;
- bool did_rsc = false;
- const struct vxlan_fdb *f;
-
- skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
- if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
- return arp_reduce(dev, skb);
- else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
- did_rsc = route_shortcircuit(dev, skb);
-
- f = vxlan_find_mac(vxlan, eth->h_dest);
- if (f == NULL) {
- did_rsc = false;
- dst = vxlan->gaddr;
- if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
- !is_multicast_ether_addr(eth->h_dest))
- vxlan_fdb_miss(vxlan, eth->h_dest);
- } else
- dst = f->remote_ip;
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan_port;
+ vni = rdst->remote_vni;
+ dst = rdst->remote_ip;
if (!dst) {
if (did_rsc) {
@@ -909,7 +1012,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
src_port = vxlan_src_port(vxlan, skb);
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = vxlan->link;
+ fl4.flowi4_oif = rdst->remote_ifindex;
fl4.flowi4_tos = RT_TOS(tos);
fl4.daddr = dst;
fl4.saddr = vxlan->saddr;
@@ -936,13 +1039,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = htonl(vxlan->vni << 8);
+ vxh->vx_vni = htonl(vni << 8);
__skb_push(skb, sizeof(*uh));
skb_reset_transport_header(skb);
uh = udp_hdr(skb);
- uh->dest = htons(vxlan_port);
+ uh->dest = htons(dst_port);
uh->source = htons(src_port);
uh->len = htons(skb->len);
@@ -965,22 +1068,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_set_owner(dev, skb);
- /* See iptunnel_xmit() */
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
-
- err = ip_local_out(skb);
- if (likely(net_xmit_eval(err) == 0)) {
- struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
+ if (handle_offloads(skb))
+ goto drop;
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- } else {
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
- }
+ iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
drop:
@@ -994,6 +1085,64 @@ tx_free:
return NETDEV_TX_OK;
}
+/* Transmit local packets over Vxlan
+ *
+ * Outer IP header inherits ECN and DF from inner header.
+ * Outer UDP destination is the VXLAN assigned port.
+ * source port is based on hash of flow
+ */
+static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct ethhdr *eth;
+ bool did_rsc = false;
+ struct vxlan_rdst group, *rdst0, *rdst;
+ struct vxlan_fdb *f;
+ int rc1, rc;
+
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
+ return arp_reduce(dev, skb);
+ else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
+ did_rsc = route_shortcircuit(dev, skb);
+
+ f = vxlan_find_mac(vxlan, eth->h_dest);
+ if (f == NULL) {
+ did_rsc = false;
+ group.remote_port = vxlan_port;
+ group.remote_vni = vxlan->vni;
+ group.remote_ip = vxlan->gaddr;
+ group.remote_ifindex = vxlan->link;
+ group.remote_next = 0;
+ rdst0 = &group;
+
+ if (group.remote_ip == htonl(INADDR_ANY) &&
+ (vxlan->flags & VXLAN_F_L2MISS) &&
+ !is_multicast_ether_addr(eth->h_dest))
+ vxlan_fdb_miss(vxlan, eth->h_dest);
+ } else
+ rdst0 = &f->remote;
+
+ rc = NETDEV_TX_OK;
+
+ /* if there are multiple destinations, send copies */
+ for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
+ struct sk_buff *skb1;
+
+ skb1 = skb_clone(skb, GFP_ATOMIC);
+ rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
+ }
+
+ rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
+ return rc;
+}
+
/* Walk the forwarding table and purge stale entries */
static void vxlan_cleanup(unsigned long arg)
{
@@ -1189,8 +1338,10 @@ static void vxlan_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -1555,6 +1706,7 @@ static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
unregister_pernet_device(&vxlan_net_ops);
+ rcu_barrier();
}
module_exit(vxlan_cleanup_module);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d1315b44237..55dd95f9824 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -83,8 +83,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
*/
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
if (!vring->va) {
- wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
- vring->size);
kfree(vring->ctx);
vring->ctx = NULL;
return -ENOMEM;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 38bc5a7997f..f73cbb512f7 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -419,8 +419,6 @@ static inline
static int alloc_ringmemory(struct b43_dmaring *ring)
{
- gfp_t flags = GFP_KERNEL;
-
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
* alignment and 8K buffers for 64-bit DMA with 8K alignment.
* In practice we could use smaller buffers for the latter, but the
@@ -435,12 +433,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
ring_mem_size, &(ring->dmabase),
- flags);
- if (!ring->descbase) {
- b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ring->descbase)
return -ENOMEM;
- }
- memset(ring->descbase, 0, ring_mem_size);
return 0;
}
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 2d3c6644f82..faeafe219c5 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -334,13 +334,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43legacy_DMA_RINGMEMSIZE,
&(ring->dmabase),
- GFP_KERNEL);
- if (!ring->descbase) {
- b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
- " failed\n");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ring->descbase)
return -ENOMEM;
- }
- memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
return 0;
}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index e0b9d7fa5de..dc1e6da9976 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -2379,10 +2379,8 @@ il3945_hw_set_hw_params(struct il_priv *il)
il->_3945.shared_virt =
dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
&il->_3945.shared_phys, GFP_KERNEL);
- if (!il->_3945.shared_virt) {
- IL_ERR("failed to allocate pci memory\n");
+ if (!il->_3945.shared_virt)
return -ENOMEM;
- }
il->hw_params.bcast_id = IL3945_BROADCAST_ID;
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7941eb3a016..238f52874f1 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -1921,8 +1921,8 @@ drop_unlock:
static inline int
il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
{
- ptr->addr =
- dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+ ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e006ea83132..db218712403 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -2566,15 +2566,13 @@ il_rx_queue_alloc(struct il_priv *il)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd =
- dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- rxq->rb_stts =
- dma_alloc_coherent(dev, sizeof(struct il_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
@@ -2941,10 +2939,9 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
* shared with device */
txq->tfds =
dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IL_ERR("Fail to alloc TFDs\n");
+ if (!txq->tfds)
goto error;
- }
+
txq->q.id = id;
return 0;
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 96f2025d936..458e699c63c 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -2235,9 +2235,8 @@ il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
return -EINVAL;
}
- desc->v_addr =
- dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
- GFP_KERNEL);
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8595c16f74d..7a508d835f5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -501,10 +501,8 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
&txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ if (!txq->tfds)
goto error;
- }
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index a0c8caef3b0..696abed3e74 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -124,12 +124,10 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
*/
addr = dma_alloc_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
- &dma, GFP_KERNEL);
+ &dma, GFP_KERNEL | __GFP_ZERO);
if (!addr)
return -ENOMEM;
- memset(addr, 0, queue->limit * queue->desc_size);
-
/*
* Initialize all queue entries to contain valid addresses.
*/
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
index b581966c88f..913b9a92fb0 100644
--- a/drivers/scsi/csiostor/Makefile
+++ b/drivers/scsi/csiostor/Makefile
@@ -8,4 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
- csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
+ csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \
+ csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index bdd78fb4fc7..a0b4c8991de 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -61,7 +61,7 @@ int csio_msi = 2;
static int dev_num;
/* FCoE Adapter types & its description */
-static const struct csio_adap_desc csio_fcoe_adapters[] = {
+static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
{"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
{"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
{"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
@@ -77,7 +77,38 @@ static const struct csio_adap_desc csio_fcoe_adapters[] = {
{"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
{"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
{"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
- {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}
+ {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
+ {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
+ {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
+ {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
+ {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
+ {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
+ {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
+ {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
+};
+
+static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
+ {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
+ {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
+ {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
+ {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
+ {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
+ {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
+ {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
+ {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
+ {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
+ {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
+ {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
+ {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
+ {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
+ {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
+ {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
+ {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
+ {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
+ {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
+ {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
};
static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@ -124,7 +155,7 @@ int csio_is_hw_removing(struct csio_hw *hw)
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
-static int
+int
csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
int polarity, int attempts, int delay, uint32_t *valp)
{
@@ -145,6 +176,24 @@ csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
}
}
+/*
+ * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @hw: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void
+csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ csio_wr_reg32(hw, addr, TP_PIO_ADDR);
+ val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
+ csio_wr_reg32(hw, val, TP_PIO_DATA);
+}
+
void
csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
uint32_t value)
@@ -157,242 +206,22 @@ csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
}
-/*
- * csio_hw_mc_read - read from MC through backdoor accesses
- * @hw: the hw module
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from MC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int
-csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
- uint64_t *ecc)
-{
- int i;
-
- if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
- return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
- csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
- csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
- MC_BIST_CMD);
- i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
- 0, 10, 1, NULL);
- if (i)
- return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
- if (ecc)
- *ecc = csio_rd_reg64(hw, MC_DATA(16));
-#undef MC_DATA
- return 0;
-}
-
-/*
- * csio_hw_edc_read - read from EDC through backdoor accesses
- * @hw: the hw module
- * @idx: which EDC to access
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int
-csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
- uint64_t *ecc)
-{
- int i;
-
- idx *= EDC_STRIDE;
- if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
- return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
- csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
- csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
- csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
- EDC_BIST_CMD + idx);
- i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
- 0, 10, 1, NULL);
- if (i)
- return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
- if (ecc)
- *ecc = csio_rd_reg64(hw, EDC_DATA(16));
-#undef EDC_DATA
- return 0;
-}
-
-/*
- * csio_mem_win_rw - read/write memory through PCIE memory window
- * @hw: the adapter
- * @addr: address of first byte requested
- * @data: MEMWIN0_APERTURE bytes of data containing the requested address
- * @dir: direction of transfer 1 => read, 0 => write
- *
- * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
- * MEMWIN0_APERTURE-byte-aligned address that covers the requested
- * address @addr.
- */
-static int
-csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
-{
- int i;
-
- /*
- * Setup offset into PCIE memory window. Address must be a
- * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
- * ensure that changes propagate before we attempt to use the new
- * values.)
- */
- csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
- PCIE_MEM_ACCESS_OFFSET);
- csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
-
- /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
- for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
- if (dir)
- *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
- else
- csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
- }
-
- return 0;
-}
-
-/*
- * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
- * @hw: the csio_hw
- * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
- * @addr: address within indicated memory type
- * @len: amount of memory to transfer
- * @buf: host memory buffer
- * @dir: direction of transfer 1 => read, 0 => write
- *
- * Reads/writes an [almost] arbitrary memory region in the firmware: the
- * firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries. The memory is transferred as a raw byte sequence
- * from/to the firmware's memory. If this memory contains data
- * structures which contain multi-byte integers, it's the callers
- * responsibility to perform appropriate byte order conversions.
- */
-static int
-csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
- uint32_t *buf, int dir)
-{
- uint32_t pos, start, end, offset, memoffset;
- int ret;
- uint32_t *data;
-
- /*
- * Argument sanity checks ...
- */
- if ((addr & 0x3) || (len & 0x3))
- return -EINVAL;
-
- data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Offset into the region of memory which is being accessed
- * MEM_EDC0 = 0
- * MEM_EDC1 = 1
- * MEM_MC = 2
- */
- memoffset = (mtype * (5 * 1024 * 1024));
-
- /* Determine the PCIE_MEM_ACCESS_OFFSET */
- addr = addr + memoffset;
-
- /*
- * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
- * at a time so we need to round down the start and round up the end.
- * We'll start copying out of the first line at (addr - start) a word
- * at a time.
- */
- start = addr & ~(MEMWIN0_APERTURE-1);
- end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
- offset = (addr - start)/sizeof(__be32);
-
- for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
- /*
- * If we're writing, copy the data from the caller's memory
- * buffer
- */
- if (!dir) {
- /*
- * If we're doing a partial write, then we need to do
- * a read-modify-write ...
- */
- if (offset || len < MEMWIN0_APERTURE) {
- ret = csio_mem_win_rw(hw, pos, data, 1);
- if (ret) {
- kfree(data);
- return ret;
- }
- }
- while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
- len > 0) {
- data[offset++] = *buf++;
- len -= sizeof(__be32);
- }
- }
-
- /*
- * Transfer a block of memory and bail if there's an error.
- */
- ret = csio_mem_win_rw(hw, pos, data, dir);
- if (ret) {
- kfree(data);
- return ret;
- }
-
- /*
- * If we're reading, copy the data into the caller's memory
- * buffer.
- */
- if (dir)
- while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
- len > 0) {
- *buf++ = data[offset++];
- len -= sizeof(__be32);
- }
- }
-
- kfree(data);
-
- return 0;
-}
-
static int
csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
{
- return csio_memory_rw(hw, mtype, addr, len, buf, 0);
+ return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
+ addr, len, buf, 0);
}
/*
* EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
*/
-#define EEPROM_MAX_RD_POLL 40
-#define EEPROM_MAX_WR_POLL 6
-#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_BASE 0x400
-#define VPD_BASE_OLD 0
-#define VPD_LEN 512
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
#define VPD_INFO_FLD_HDR_SIZE 3
/*
@@ -817,23 +646,6 @@ out:
return 0;
}
-/*
- * csio_hw_flash_cfg_addr - return the address of the flash
- * configuration file
- * @hw: the HW module
- *
- * Return the address within the flash where the Firmware Configuration
- * File is stored.
- */
-static unsigned int
-csio_hw_flash_cfg_addr(struct csio_hw *hw)
-{
- if (hw->params.sf_size == 0x100000)
- return FPGA_FLASH_CFG_OFFSET;
- else
- return FLASH_CFG_OFFSET;
-}
-
static void
csio_hw_print_fw_version(struct csio_hw *hw, char *str)
{
@@ -898,13 +710,13 @@ csio_hw_check_fw_version(struct csio_hw *hw)
minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
csio_err(hw, "card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR);
+ major, FW_VERSION_MAJOR(hw));
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
return 0; /* perfect match */
/* Minor/micro version mismatch */
@@ -1044,7 +856,7 @@ static void
csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
{
uint16_t val;
- uint32_t pcie_cap;
+ int pcie_cap;
if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
pci_read_config_word(hw->pdev,
@@ -1056,84 +868,6 @@ csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
}
}
-
-/*
- * Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static uint32_t
-csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
-{
- u32 val = 0;
- struct csio_mb *mbp;
- int rv;
- struct fw_ldst_cmd *ldst_cmd;
-
- mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
- if (!mbp) {
- CSIO_INC_STATS(hw, n_err_nomem);
- pci_read_config_dword(hw->pdev, reg, &val);
- return val;
- }
-
- csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
-
- rv = csio_mb_issue(hw, mbp);
-
- /*
- * If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (rv == 0) {
- ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
- val = ntohl(ldst_cmd->u.pcie.data[0]);
- } else
- pci_read_config_dword(hw->pdev, reg, &val);
-
- mempool_free(mbp, hw->mb_mempool);
-
- return val;
-} /* csio_read_pcie_cfg4 */
-
-static int
-csio_hw_set_mem_win(struct csio_hw *hw)
-{
- u32 bar0;
-
- /*
- * Truncation intentional: we only read the bottom 32-bits of the
- * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
- * read BAR0 instead of using pci_resource_start() because we could be
- * operating from within a Virtual Machine which is trapping our
- * accesses to our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will be
- * coming across the PCI-E link.
- */
- bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-
- /*
- * Set up memory window for accessing adapter memory ranges. (Read
- * back MA register to ensure that changes propagate before we attempt
- * to use the new values.)
- */
- csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
- WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
- csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
- WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
- csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
- WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
- csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
- return 0;
-} /* csio_hw_set_mem_win */
-
-
-
/*****************************************************************************/
/* HW State machine assists */
/*****************************************************************************/
@@ -1234,7 +968,9 @@ retry:
for (;;) {
uint32_t pcie_fw;
+ spin_unlock_irq(&hw->lock);
msleep(50);
+ spin_lock_irq(&hw->lock);
waiting -= 50;
/*
@@ -2121,9 +1857,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
uint32_t *cfg_data;
int value_to_add = 0;
- if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {
- csio_err(hw, "could not find config file " CSIO_CF_FNAME
- ",err: %d\n", ret);
+ if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
+ csio_err(hw, "could not find config file %s, err: %d\n",
+ CSIO_CF_FNAME(hw), ret);
return -ENOENT;
}
@@ -2147,9 +1883,24 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
ret = csio_memory_write(hw, mtype, maddr,
cf->size + value_to_add, cfg_data);
+
+ if ((ret == 0) && (value_to_add != 0)) {
+ union {
+ u32 word;
+ char buf[4];
+ } last;
+ size_t size = cf->size & ~0x3;
+ int i;
+
+ last.word = cfg_data[size >> 2];
+ for (i = value_to_add; i < 4; i++)
+ last.buf[i] = 0;
+ ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
+ }
if (ret == 0) {
- csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");
- strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
+ csio_info(hw, "config file upgraded to %s\n",
+ CSIO_CF_FNAME(hw));
+ snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
}
leave:
@@ -2179,7 +1930,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
{
unsigned int mtype, maddr;
int rv;
- uint32_t finiver, finicsum, cfcsum;
+ uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
int using_flash;
char path[64];
@@ -2207,7 +1958,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
* config file from flash.
*/
mtype = FW_MEMTYPE_CF_FLASH;
- maddr = csio_hw_flash_cfg_addr(hw);
+ maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
using_flash = 1;
} else {
/*
@@ -2346,30 +2097,32 @@ csio_hw_flash_fw(struct csio_hw *hw)
struct pci_dev *pci_dev = hw->pdev;
struct device *dev = &pci_dev->dev ;
- if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {
- csio_err(hw, "could not find firmware image " CSIO_FW_FNAME
- ",err: %d\n", ret);
+ if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
+ csio_err(hw, "could not find firmware image %s, err: %d\n",
+ CSIO_FW_FNAME(hw), ret);
return -EINVAL;
}
hdr = (const struct fw_hdr *)fw->data;
fw_ver = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)
+ if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
return -EINVAL; /* wrong major version, won't do */
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
fw_ver > hw->fwrev) {
ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
/*force=*/false);
if (!ret)
- csio_info(hw, "firmware upgraded to version %pI4 from "
- CSIO_FW_FNAME "\n", &hdr->fw_ver);
+ csio_info(hw,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, CSIO_FW_FNAME(hw));
else
csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
- }
+ } else
+ ret = -EINVAL;
release_firmware(fw);
@@ -2410,7 +2163,7 @@ csio_hw_configure(struct csio_hw *hw)
/* Set pci completion timeout value to 4 seconds. */
csio_set_pcie_completion_timeout(hw, 0xd);
- csio_hw_set_mem_win(hw);
+ hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
rv = csio_hw_get_fw_version(hw, &hw->fwrev);
if (rv != 0)
@@ -2478,6 +2231,8 @@ csio_hw_configure(struct csio_hw *hw)
} else {
if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
/* device parameters */
rv = csio_get_device_params(hw);
if (rv != 0)
@@ -2651,7 +2406,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
}
-static void
+void
csio_hw_fatal_err(struct csio_hw *hw)
{
csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
@@ -2990,14 +2745,6 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
/* END: HW SM */
/*****************************************************************************/
-/* Slow path handlers */
-struct intr_info {
- unsigned int mask; /* bits to check in interrupt status */
- const char *msg; /* message to print or NULL */
- short stat_idx; /* stat counter to increment or -1 */
- unsigned short fatal; /* whether the condition reported is fatal */
-};
-
/*
* csio_handle_intr_status - table driven interrupt handler
* @hw: HW instance
@@ -3011,7 +2758,7 @@ struct intr_info {
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
*/
-static int
+int
csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
const struct intr_info *acts)
{
@@ -3038,80 +2785,6 @@ csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
}
/*
- * Interrupt handler for the PCIE module.
- */
-static void
-csio_pcie_intr_handler(struct csio_hw *hw)
-{
- static struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_intr_info[] = {
- { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- { MSIDATAPERR, "MSI data parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- { MATAGPERR, "PCI MA tag parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- { RXWRPERR, "PCI Rx write parity error", -1, 1 },
- { RPLPERR, "PCI replay buffer parity error", -1, 1 },
- { PCIESINT, "PCI core secondary fault", -1, 1 },
- { PCIEPINT, "PCI core primary fault", -1, 1 },
- { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
- 0 },
- { 0, NULL, 0, 0 }
- };
-
- int fat;
-
- fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
- csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
- if (fat)
- csio_hw_fatal_err(hw);
-}
-
-/*
* TP interrupt handler.
*/
static void csio_tp_intr_handler(struct csio_hw *hw)
@@ -3517,7 +3190,7 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw)
*/
static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
{
- uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
@@ -3527,7 +3200,7 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
if (v & RXFIFO_PRTY_ERR)
csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
- csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
csio_hw_fatal_err(hw);
}
@@ -3596,7 +3269,7 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
csio_xgmac_intr_handler(hw, 3);
if (cause & PCIE)
- csio_pcie_intr_handler(hw);
+ hw->chip_ops->chip_pcie_intr_handler(hw);
if (cause & MC)
csio_mem_intr_handler(hw, MEM_MC);
@@ -4262,6 +3935,7 @@ csio_hw_get_device_id(struct csio_hw *hw)
&hw->params.pci.device_id);
csio_dev_id_cached(hw);
+ hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
} /* csio_hw_get_device_id */
@@ -4280,19 +3954,21 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
- if (prot_type == CSIO_FPGA) {
+ if (prot_type == CSIO_T4_FCOE_ASIC) {
+ memcpy(hw->hw_ver,
+ csio_t4_fcoe_adapters[adap_type].model_no, 16);
memcpy(hw->model_desc,
- csio_fcoe_adapters[13].description, 32);
- } else if (prot_type == CSIO_T4_FCOE_ASIC) {
+ csio_t4_fcoe_adapters[adap_type].description,
+ 32);
+ } else if (prot_type == CSIO_T5_FCOE_ASIC) {
memcpy(hw->hw_ver,
- csio_fcoe_adapters[adap_type].model_no, 16);
+ csio_t5_fcoe_adapters[adap_type].model_no, 16);
memcpy(hw->model_desc,
- csio_fcoe_adapters[adap_type].description, 32);
+ csio_t5_fcoe_adapters[adap_type].description,
+ 32);
} else {
char tempName[32] = "Chelsio FCoE Controller";
memcpy(hw->model_desc, tempName, 32);
-
- CSIO_DB_ASSERT(0);
}
}
} /* csio_hw_set_description */
@@ -4321,6 +3997,9 @@ csio_hw_init(struct csio_hw *hw)
strcpy(hw->name, CSIO_HW_NAME);
+ /* Initialize the HW chip ops with T4/T5 specific ops */
+ hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
+
/* Set the model & its description */
ven_id = hw->params.pci.vendor_id;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9edcca4c71a..489fc095cb0 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -48,6 +48,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
+#include "csio_hw_chip.h"
#include "csio_wr.h"
#include "csio_mb.h"
#include "csio_scsi.h"
@@ -60,13 +61,6 @@
*/
#define FW_HOSTERROR 255
-#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
-#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
-
-#define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 2
-#define FW_VERSION_MICRO 8
-
#define CSIO_HW_NAME "Chelsio FCoE Adapter"
#define CSIO_MAX_PFN 8
#define CSIO_MAX_PPORTS 4
@@ -123,8 +117,6 @@ extern int csio_msi;
#define CSIO_VENDOR_ID 0x1425
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
-#define CSIO_FPGA 0xA000
-#define CSIO_T4_FCOE_ASIC 0x4600
#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
EDC1 | LE | TP | MA | PM_TX | PM_RX | \
@@ -207,17 +199,6 @@ enum {
SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
-enum { MEM_EDC0, MEM_EDC1, MEM_MC };
-
-enum {
- MEMWIN0_APERTURE = 2048,
- MEMWIN0_BASE = 0x1b800,
- MEMWIN1_APERTURE = 32768,
- MEMWIN1_BASE = 0x28000,
- MEMWIN2_APERTURE = 65536,
- MEMWIN2_BASE = 0x30000,
-};
-
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -239,9 +220,6 @@ enum {
FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
FLASH_CFG_OFFSET = 0x1f0000,
FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
- FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
- * at 1MB - 64KB */
- FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
};
/*
@@ -259,6 +237,8 @@ enum {
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ /* Location of Firmware Configuration File in FLASH. */
+ FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
};
#undef FLASH_START
@@ -310,7 +290,7 @@ struct csio_adap_desc {
struct pci_params {
uint16_t vendor_id;
uint16_t device_id;
- uint32_t vpd_cap_addr;
+ int vpd_cap_addr;
uint16_t speed;
uint8_t width;
};
@@ -513,6 +493,7 @@ struct csio_hw {
uint32_t fwrev;
uint32_t tp_vers;
char chip_ver;
+ uint16_t chip_id; /* Tells T4/T5 chip */
uint32_t cfg_finiver;
uint32_t cfg_finicsum;
uint32_t cfg_cfcsum;
@@ -556,6 +537,9 @@ struct csio_hw {
*/
struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
+ struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific
+ * Operations
+ */
/* MSIX vectors */
struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
@@ -636,9 +620,16 @@ csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
#define csio_dbg(__hw, __fmt, ...)
#endif
+int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int,
+ int, int, uint32_t *);
+void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int,
+ unsigned int, unsigned int);
int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
void csio_hw_intr_disable(struct csio_hw *);
-int csio_hw_slow_intr_handler(struct csio_hw *hw);
+int csio_hw_slow_intr_handler(struct csio_hw *);
+int csio_handle_intr_status(struct csio_hw *, unsigned int,
+ const struct intr_info *);
+
int csio_hw_start(struct csio_hw *);
int csio_hw_stop(struct csio_hw *);
int csio_hw_reset(struct csio_hw *);
@@ -647,19 +638,17 @@ int csio_is_hw_removing(struct csio_hw *);
int csio_fwevtq_handler(struct csio_hw *);
void csio_evtq_worker(struct work_struct *);
-int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
- void *evt_msg, uint16_t len);
+int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t);
void csio_evtq_flush(struct csio_hw *hw);
int csio_request_irqs(struct csio_hw *);
void csio_intr_enable(struct csio_hw *);
void csio_intr_disable(struct csio_hw *, bool);
+void csio_hw_fatal_err(struct csio_hw *);
struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
int csio_config_queues(struct csio_hw *);
-int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
-int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
int csio_hw_init(struct csio_hw *);
void csio_hw_exit(struct csio_hw *);
#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
new file mode 100644
index 00000000000..bca0de61ae8
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -0,0 +1,175 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_CHIP_H__
+#define __CSIO_HW_CHIP_H__
+
+#include "csio_defs.h"
+
+/* FCoE device IDs for T4 */
+#define CSIO_DEVID_T440DBG_FCOE 0x4600
+#define CSIO_DEVID_T420CR_FCOE 0x4601
+#define CSIO_DEVID_T422CR_FCOE 0x4602
+#define CSIO_DEVID_T440CR_FCOE 0x4603
+#define CSIO_DEVID_T420BCH_FCOE 0x4604
+#define CSIO_DEVID_T440BCH_FCOE 0x4605
+#define CSIO_DEVID_T440CH_FCOE 0x4606
+#define CSIO_DEVID_T420SO_FCOE 0x4607
+#define CSIO_DEVID_T420CX_FCOE 0x4608
+#define CSIO_DEVID_T420BT_FCOE 0x4609
+#define CSIO_DEVID_T404BT_FCOE 0x460A
+#define CSIO_DEVID_B420_FCOE 0x460B
+#define CSIO_DEVID_B404_FCOE 0x460C
+#define CSIO_DEVID_T480CR_FCOE 0x460D
+#define CSIO_DEVID_T440LPCR_FCOE 0x460E
+#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F
+#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680
+#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681
+#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682
+#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683
+#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684
+#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685
+#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686
+#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687
+
+/* FCoE device IDs for T5 */
+#define CSIO_DEVID_T580DBG_FCOE 0x5600
+#define CSIO_DEVID_T520CR_FCOE 0x5601
+#define CSIO_DEVID_T522CR_FCOE 0x5602
+#define CSIO_DEVID_T540CR_FCOE 0x5603
+#define CSIO_DEVID_T520BCH_FCOE 0x5604
+#define CSIO_DEVID_T540BCH_FCOE 0x5605
+#define CSIO_DEVID_T540CH_FCOE 0x5606
+#define CSIO_DEVID_T520SO_FCOE 0x5607
+#define CSIO_DEVID_T520CX_FCOE 0x5608
+#define CSIO_DEVID_T520BT_FCOE 0x5609
+#define CSIO_DEVID_T504BT_FCOE 0x560A
+#define CSIO_DEVID_B520_FCOE 0x560B
+#define CSIO_DEVID_B504_FCOE 0x560C
+#define CSIO_DEVID_T580CR2_FCOE 0x560D
+#define CSIO_DEVID_T540LPCR_FCOE 0x560E
+#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F
+#define CSIO_DEVID_T580LPCR_FCOE 0x5610
+#define CSIO_DEVID_T520LLCR_FCOE 0x5611
+#define CSIO_DEVID_T560CR_FCOE 0x5612
+#define CSIO_DEVID_T580CR_FCOE 0x5613
+
+/* Define MACRO values */
+#define CSIO_HW_T4 0x4000
+#define CSIO_T4_FCOE_ASIC 0x4600
+#define CSIO_HW_T5 0x5000
+#define CSIO_T5_FCOE_ASIC 0x5600
+#define CSIO_HW_CHIP_MASK 0xF000
+#define T4_REGMAP_SIZE (160 * 1024)
+#define T5_REGMAP_SIZE (332 * 1024)
+#define FW_FNAME_T4 "cxgb4/t4fw.bin"
+#define FW_FNAME_T5 "cxgb4/t5fw.bin"
+#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt"
+#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
+
+/* Define static functions */
+static inline int csio_is_t4(uint16_t chip)
+{
+ return (chip == CSIO_HW_T4);
+}
+
+static inline int csio_is_t5(uint16_t chip)
+{
+ return (chip == CSIO_HW_T5);
+}
+
+/* Define MACRO DEFINITIONS */
+#define CSIO_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
+
+#define CSIO_HW_PIDX(hw, index) \
+ (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
+ (PIDX_T5(index) | DBTYPE(1U)))
+
+#define CSIO_HW_LP_INT_THRESH(hw, val) \
+ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
+ (V_LP_INT_THRESH_T5(val)))
+
+#define CSIO_HW_M_LP_INT_THRESH(hw) \
+ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
+
+#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
+ (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
+ (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
+
+#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
+#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
+#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
+
+#define CSIO_FW_FNAME(hw) \
+ (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
+
+#define CSIO_CF_FNAME(hw) \
+ (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5)
+
+/* Declare ENUMS */
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
+enum {
+ MEMWIN_APERTURE = 2048,
+ MEMWIN_BASE = 0x1b800,
+ MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */
+};
+
+/* Slow path handlers */
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/* T4/T5 Chip specific ops */
+struct csio_hw;
+struct csio_hw_chip_ops {
+ int (*chip_set_mem_win)(struct csio_hw *, uint32_t);
+ void (*chip_pcie_intr_handler)(struct csio_hw *);
+ uint32_t (*chip_flash_cfg_addr)(struct csio_hw *);
+ int (*chip_mc_read)(struct csio_hw *, int, uint32_t,
+ __be32 *, uint64_t *);
+ int (*chip_edc_read)(struct csio_hw *, int, uint32_t,
+ __be32 *, uint64_t *);
+ int (*chip_memory_rw)(struct csio_hw *, u32, int, u32,
+ u32, uint32_t *, int);
+ void (*chip_dfs_create_ext_mem)(struct csio_hw *);
+};
+
+extern struct csio_hw_chip_ops t4_ops;
+extern struct csio_hw_chip_ops t5_ops;
+
+#endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
new file mode 100644
index 00000000000..89ecbac5478
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -0,0 +1,403 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "csio_hw.h"
+#include "csio_init.h"
+
+/*
+ * Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static uint32_t
+csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg)
+{
+ u32 val = 0;
+ struct csio_mb *mbp;
+ int rv;
+ struct fw_ldst_cmd *ldst_cmd;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ pci_read_config_dword(hw->pdev, reg, &val);
+ return val;
+ }
+
+ csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
+ rv = csio_mb_issue(hw, mbp);
+
+ /*
+ * If the LDST Command suucceeded, exctract the returned register
+ * value. Otherwise read it directly ourself.
+ */
+ if (rv == 0) {
+ ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ val = ntohl(ldst_cmd->u.pcie.data[0]);
+ } else
+ pci_read_config_dword(hw->pdev, reg, &val);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return val;
+}
+
+static int
+csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
+{
+ u32 bar0;
+ u32 mem_win_base;
+
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+ bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ mem_win_base = bar0 + MEMWIN_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, mem_win_base | BIR(0) |
+ WINDOW(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ return 0;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_t4_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ static struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_t4_flash_cfg_addr - return the address of the flash configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_t4_flash_cfg_addr(struct csio_hw *hw)
+{
+ return FLASH_CFG_OFFSET;
+}
+
+/*
+ * csio_t4_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @idx: not used for T4 adapter
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ MC_BIST_CMD);
+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_t4_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
+ EDC_BIST_CMD + idx);
+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @win: PCI-E memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
+ u32 len, uint32_t *buf, int dir)
+{
+ u32 pos, start, offset, memoffset, bar0;
+ u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- T4
+ */
+ edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ mem_aperture = 1 << (WINDOW(mem_reg) + 10);
+ mem_base = GET_PCIEOFST(mem_reg) << 10;
+
+ bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+ mem_base -= bar0;
+
+ start = addr & ~(mem_aperture-1);
+ offset = addr - start;
+
+ csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
+ mem_reg, mem_aperture);
+ csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
+ mem_base, memoffset);
+ csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n",
+ bar0, start, offset);
+ csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
+ mtype, addr, len);
+
+ for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
+ /*
+ * Move PCI-E Memory Window to our current transfer
+ * position. Read it back to ensure that changes propagate
+ * before we attempt to use the new value.
+ */
+ csio_wr_reg32(hw, pos,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+
+ while (offset < mem_aperture && len > 0) {
+ if (dir)
+ *buf++ = csio_rd_reg32(hw, mem_base + offset);
+ else
+ csio_wr_reg32(hw, *buf++, mem_base + offset);
+
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+ }
+ }
+ return 0;
+}
+
+/*
+ * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values
+ * @hw: the csio_hw
+ *
+ * This function creates files in the debugfs with external memory region MC.
+ */
+static void
+csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
+{
+ u32 size;
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EXT_MEM_ENABLE) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
+ csio_add_debugfs_mem(hw, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ }
+}
+
+/* T4 adapter specific function */
+struct csio_hw_chip_ops t4_ops = {
+ .chip_set_mem_win = csio_t4_set_mem_win,
+ .chip_pcie_intr_handler = csio_t4_pcie_intr_handler,
+ .chip_flash_cfg_addr = csio_t4_flash_cfg_addr,
+ .chip_mc_read = csio_t4_mc_read,
+ .chip_edc_read = csio_t4_edc_read,
+ .chip_memory_rw = csio_t4_memory_rw,
+ .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem,
+};
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
new file mode 100644
index 00000000000..27745c170c2
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -0,0 +1,397 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "csio_hw.h"
+#include "csio_init.h"
+
+static int
+csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
+{
+ u32 mem_win_base;
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win_base = MEMWIN_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, mem_win_base | BIR(0) |
+ WINDOW(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+
+ return 0;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_t5_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ static struct intr_info pcie_intr_info[] = {
+ { MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+ -1, 1 },
+ { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR, "Outbound read error", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_t5_flash_cfg_addr - return the address of the flash configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_t5_flash_cfg_addr(struct csio_hw *hw)
+{
+ return FLASH_CFG_START;
+}
+
+/*
+ * csio_t5_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @idx: index to the register
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+ uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
+ uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
+
+ mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
+ mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
+ mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
+ mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+
+ if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
+ csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
+ csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ mc_bist_cmd_reg);
+ i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_t5_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+ uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
+ uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
+
+/*
+ * These macro are missing in t4_regs.h file.
+ */
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+
+ edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+#undef EDC_REG_T5
+#undef EDC_STRIDE_T5
+
+ if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
+ csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
+ csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ edc_bist_cmd_reg);
+ i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @win: PCI-E memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
+ u32 len, uint32_t *buf, int dir)
+{
+ u32 pos, start, offset, memoffset;
+ u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- T4
+ * MEM_MC0 = 2 -- For T5
+ * MEM_MC1 = 3 -- For T5
+ */
+ edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ mem_aperture = 1 << (WINDOW(mem_reg) + 10);
+ mem_base = GET_PCIEOFST(mem_reg) << 10;
+
+ start = addr & ~(mem_aperture-1);
+ offset = addr - start;
+ win_pf = V_PFNUM(hw->pfn);
+
+ csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
+ mem_reg, mem_aperture);
+ csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
+ mem_base, memoffset);
+ csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n",
+ start, offset, win_pf);
+ csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
+ mtype, addr, len);
+
+ for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
+ /*
+ * Move PCI-E Memory Window to our current transfer
+ * position. Read it back to ensure that changes propagate
+ * before we attempt to use the new value.
+ */
+ csio_wr_reg32(hw, pos | win_pf,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+
+ while (offset < mem_aperture && len > 0) {
+ if (dir)
+ *buf++ = csio_rd_reg32(hw, mem_base + offset);
+ else
+ csio_wr_reg32(hw, *buf++, mem_base + offset);
+
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+ }
+ }
+ return 0;
+}
+
+/*
+ * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values
+ * @hw: the csio_hw
+ *
+ * This function creates files in the debugfs with external memory region
+ * MC0 & MC1.
+ */
+static void
+csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
+{
+ u32 size;
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EXT_MEM_ENABLE) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
+ csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR);
+ csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+}
+
+/* T5 adapter specific function */
+struct csio_hw_chip_ops t5_ops = {
+ .chip_set_mem_win = csio_t5_set_mem_win,
+ .chip_pcie_intr_handler = csio_t5_pcie_intr_handler,
+ .chip_flash_cfg_addr = csio_t5_flash_cfg_addr,
+ .chip_mc_read = csio_t5_mc_read,
+ .chip_edc_read = csio_t5_edc_read,
+ .chip_memory_rw = csio_t5_memory_rw,
+ .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem,
+};
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 0604b5ff363..00346fe939d 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -81,9 +81,11 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
__be32 data[16];
if (mem == MEM_MC)
- ret = csio_hw_mc_read(hw, pos, data, NULL);
+ ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
+ data, NULL);
else
- ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
+ ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
+ data, NULL);
if (ret)
return ret;
@@ -108,7 +110,7 @@ static const struct file_operations csio_mem_debugfs_fops = {
.llseek = default_llseek,
};
-static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
unsigned int idx, unsigned int size_mb)
{
struct dentry *de;
@@ -131,9 +133,8 @@ static int csio_setup_debugfs(struct csio_hw *hw)
csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
if (i & EDRAM1_ENABLE)
csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- csio_add_debugfs_mem(hw, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
+
+ hw->chip_ops->chip_dfs_create_ext_mem(hw);
return 0;
}
@@ -1169,7 +1170,7 @@ static struct pci_error_handlers csio_err_handler = {
};
static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
- CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */
CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
@@ -1184,8 +1185,34 @@ static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
- CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
+ CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */
+ CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */
{ 0, 0, 0, 0, 0, 0, 0 }
};
@@ -1259,4 +1286,5 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC);
MODULE_LICENSE(CSIO_DRV_LICENSE);
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
-MODULE_FIRMWARE(CSIO_FW_FNAME);
+MODULE_FIRMWARE(FW_FNAME_T4);
+MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 0838fd7ec9c..5cc5d317a44 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -52,31 +52,6 @@
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0"
-#define CSIO_DEVICE(devid, idx) \
-{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
-
-#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
- ((_dev) == CSIO_DEVID_PE10K_PF1))
-
-/* FCoE device IDs */
-#define CSIO_DEVID_PE10K 0xA000
-#define CSIO_DEVID_PE10K_PF1 0xA001
-#define CSIO_DEVID_T440DBG_FCOE 0x4600
-#define CSIO_DEVID_T420CR_FCOE 0x4601
-#define CSIO_DEVID_T422CR_FCOE 0x4602
-#define CSIO_DEVID_T440CR_FCOE 0x4603
-#define CSIO_DEVID_T420BCH_FCOE 0x4604
-#define CSIO_DEVID_T440BCH_FCOE 0x4605
-#define CSIO_DEVID_T440CH_FCOE 0x4606
-#define CSIO_DEVID_T420SO_FCOE 0x4607
-#define CSIO_DEVID_T420CX_FCOE 0x4608
-#define CSIO_DEVID_T420BT_FCOE 0x4609
-#define CSIO_DEVID_T404BT_FCOE 0x460A
-#define CSIO_DEVID_B420_FCOE 0x460B
-#define CSIO_DEVID_B404_FCOE 0x460C
-#define CSIO_DEVID_T480CR_FCOE 0x460D
-#define CSIO_DEVID_T440LPCR_FCOE 0x460E
-
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
@@ -100,6 +75,10 @@ struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
void csio_shost_exit(struct csio_lnode *);
void csio_lnodes_exit(struct csio_hw *, bool);
+/* DebugFS helper routines */
+void csio_add_debugfs_mem(struct csio_hw *, const char *,
+ unsigned int, unsigned int);
+
static inline struct Scsi_Host *
csio_ln_to_shost(struct csio_lnode *ln)
{
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 8d84988ab06..0f9c04175b1 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
uint32_t n_rnode_match; /* matched rnode */
uint32_t n_dev_loss_tmo; /* Device loss timeout */
uint32_t n_fdmi_err; /* fdmi err */
- uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
uint32_t n_rnode_alloc; /* rnode allocated */
uint32_t n_rnode_free; /* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
index 51c6a388de2..e9c3b045f58 100644
--- a/drivers/scsi/csiostor/csio_rnode.c
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -302,7 +302,7 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
{
uint8_t rport_type;
struct csio_rnode *rn, *match_rn;
- uint32_t vnp_flowid;
+ uint32_t vnp_flowid = 0;
__be32 *port_id;
port_id = (__be32 *)&rdevp->r_id[0];
@@ -350,6 +350,14 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
* Else, go ahead and alloc a new rnode.
*/
if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
+ if (rn == match_rn)
+ goto found_rnode;
+ csio_ln_dbg(ln,
+ "nport_id:x%x and wwpn:%llx"
+ " match for ssni:x%x\n",
+ rn->nport_id,
+ wwn_to_u64(rdevp->wwpn),
+ rdev_flowid);
if (csio_is_rnode_ready(rn)) {
csio_ln_warn(ln,
"rnode is already"
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index a3b434c801d..65940096a80 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
uint32_t n_err_nomem; /* error nomem */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* unexpected event */
- uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
uint32_t n_lun_rst; /* Number of resets of
* of LUNs under this
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index c32df1bdaa9..4255ce264ab 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -85,8 +85,8 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
*/
if (flq->inc_idx >= 8) {
csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
- PIDX(flq->inc_idx / 8),
- MYPF_REG(SGE_PF_KDOORBELL));
+ CSIO_HW_PIDX(hw, flq->inc_idx / 8),
+ MYPF_REG(SGE_PF_KDOORBELL));
flq->inc_idx &= 7;
}
}
@@ -989,7 +989,8 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
wmb();
/* Ring SGE Doorbell writing q->pidx into it */
csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
- PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
+ CSIO_HW_PIDX(hw, q->inc_idx),
+ MYPF_REG(SGE_PF_KDOORBELL));
q->inc_idx = 0;
return 0;
@@ -1331,20 +1332,30 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
- csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE2);
- csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE3);
+
+ /*
+ * If using hard params, the following will get set correctly
+ * in csio_wr_set_sge().
+ */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE3);
+ }
csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
/* default value of rx_dma_offset of the NIC driver */
csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+
+ csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
+ CSUM_HAS_PSEUDO_HDR, 0);
}
static void
@@ -1460,18 +1471,21 @@ csio_wr_set_sge(struct csio_hw *hw)
* and generate an interrupt when this occurs so we can recover.
*/
csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
- HP_INT_THRESH(HP_INT_THRESH_MASK) |
- LP_INT_THRESH(LP_INT_THRESH_MASK),
- HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
- LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
+ HP_INT_THRESH(HP_INT_THRESH_MASK) |
+ CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
+ HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
+ CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
+
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
ENABLE_DROP);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
- CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
- CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
+ csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1522,22 +1536,24 @@ void
csio_wr_sge_init(struct csio_hw *hw)
{
/*
- * If we are master:
+ * If we are master and chip is not initialized:
* - If we plan to use the config file, we need to fixup some
* host specific registers, and read the rest of the SGE
* configuration.
* - If we dont plan to use the config file, we need to initialize
* SGE entirely, including fixing the host specific registers.
+ * If we are master and chip is initialized, just read and work off of
+ * the already initialized SGE values.
* If we arent the master, we are only allowed to read and work off of
* the already initialized SGE values.
*
* Therefore, before calling this function, we assume that the master-
- * ship of the card, and whether to use config file or not, have
- * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
- * CSIO_HWF_MASTER should be set/unset.
+ * ship of the card, state and whether to use config file or not, have
+ * already been decided.
*/
if (csio_is_hw_master(hw)) {
- csio_wr_fixup_host_params(hw);
+ if (hw->fw_state != CSIO_DEV_STATE_INIT)
+ csio_wr_fixup_host_params(hw);
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
csio_wr_get_sge(hw);