summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 20:01:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 20:01:30 -0800
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/infiniband/hw
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c118
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c60
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c62
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h126
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h812
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c1
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c57
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c246
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h26
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c90
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
20 files changed, 1015 insertions, 669 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9edc200b311..57176ddd4c5 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
- ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
+ ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
((AF_INET == ep->com.remote_addr.ss_family) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr);
ep->mss = ep->emss;
- if (GET_TCPOPT_TSTAMP(opt))
+ if (TCPOPT_TSTAMP_G(opt))
ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
- PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
ep->mss, ep->emss);
}
@@ -652,29 +652,29 @@ static int send_connect(struct c4iw_ep *ep)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- opt0 = (nocong ? NO_CONG(1) : 0) |
+ opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
- CCTRL_ECN(enable_ecn) |
+ CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
- opt2 |= TSTAMPS_EN(1);
+ opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack)
- opt2 |= SACK_EN(1);
+ opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID_F;
- opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
- unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_atid(t, atid);
@@ -1258,8 +1258,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
- F_RX_DACK_CHANGE |
- V_RX_DACK_MODE(dack_mode));
+ RX_DACK_CHANGE_F |
+ RX_DACK_MODE_V(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+ req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
- (nocong ? NO_CONG(1) : 0) |
+ req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
+ (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
- req->tcb.opt2 = (__force __be32) (PACE(1) |
- TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+ req->tcb.opt2 = (__force __be32) (PACE_V(1) |
+ TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL_V(0) |
- CCTRL_ECN(enable_ecn) |
+ CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
if (enable_tcp_sack)
- req->tcb.opt2 |= (__force __be32)SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32)SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_open_rpl *rpl = cplhdr(skb);
- unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
- ntohl(rpl->atid_status)));
+ unsigned int atid = TID_TID_G(AOPEN_ATID_G(
+ ntohl(rpl->atid_status)));
struct tid_info *t = dev->rdev.lldi.tids;
- int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+ int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
struct sockaddr_in *la;
struct sockaddr_in *ra;
struct sockaddr_in6 *la6;
@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) {
send_fw_act_open_req(ep,
- GET_TID_TID(GET_AOPEN_ATID(
+ TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status))));
return 0;
}
@@ -2181,39 +2181,39 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
- opt0 = (nocong ? NO_CONG(1) : 0) |
+ opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
- DELACK(1) |
+ DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP(ep->tos >> 2) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
- opt2 |= TSTAMPS_EN(1);
+ opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack && req->tcpopt.sack)
- opt2 |= SACK_EN(1);
+ opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
- tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
- G_IP_HDR_LEN(hlen);
+ tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+ IP_HDR_LEN_G(hlen);
if (tcph->ece && tcph->cwr)
- opt2 |= CCTRL_ECN(1);
+ opt2 |= CCTRL_ECN_V(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
- opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
@@ -2245,8 +2245,8 @@ static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
__u8 *local_ip, __u8 *peer_ip,
__be16 *local_port, __be16 *peer_port)
{
- int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
- int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
+ int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
struct tcphdr *tcp = (struct tcphdr *)
@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+ unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port,
- GET_POPEN_TOS(ntohl(req->tos_stid)));
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS(ntohl(req->tos_stid)),
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
- child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+ child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
child_ep->dst = dst;
child_ep->hwtid = hwtid;
@@ -3500,24 +3500,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
- V_SYN_MAC_IDX(G_RX_MACIDX(
+ req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
+ SYN_MAC_IDX_V(RX_MACIDX_G(
(__force int) htonl(l2info))) |
- F_SYN_XACT_MATCH);
+ SYN_XACT_MATCH_F);
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
- G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
- req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+ RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
+ RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
+ req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
(__force int) htonl(l2info))) |
- V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+ TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
(__force int) htons(hdr_len))) |
- V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+ IP_HDR_LEN_V(RX_IPHDR_LEN_G(
(__force int) htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
+ ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
- req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
- PASS_OPEN_TOS(tos));
+ req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
+ PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
if (tmp_opt.wscale_ok)
req->tcpopt.wsf = tmp_opt.snd_wscale;
@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+ req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter;
@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
FW_OFLD_CONNECTION_WR_ASTID_V(
- GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+ PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
/*
* We store the qid in opt2 which will be used by the firmware
@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct neighbour *neigh;
/* Drop all non-SYN packets */
- if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+ if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
goto reject;
/*
@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
}
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
- G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
- G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+ RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
+ RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e9fd3a02929..ab7692ac204 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -52,7 +52,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- V_FW_RI_RES_WR_NRES(1) |
+ FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@@ -122,7 +122,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- V_FW_RI_RES_WR_NRES(1) |
+ FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@@ -131,17 +131,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.op = FW_RI_RES_OP_WRITE;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
- V_FW_RI_RES_WR_IQANUS(0) |
- V_FW_RI_RES_WR_IQANUD(1) |
- F_FW_RI_RES_WR_IQANDST |
- V_FW_RI_RES_WR_IQANDSTINDEX(
+ FW_RI_RES_WR_IQANUS_V(0) |
+ FW_RI_RES_WR_IQANUD_V(1) |
+ FW_RI_RES_WR_IQANDST_F |
+ FW_RI_RES_WR_IQANDSTINDEX_V(
rdev->lldi.ciq_ids[cq->vector]));
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
- F_FW_RI_RES_WR_IQDROPRSS |
- V_FW_RI_RES_WR_IQPCIECH(2) |
- V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
- F_FW_RI_RES_WR_IQO |
- V_FW_RI_RES_WR_IQESIZE(1));
+ FW_RI_RES_WR_IQDROPRSS_F |
+ FW_RI_RES_WR_IQPCIECH_V(2) |
+ FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
+ FW_RI_RES_WR_IQO_F |
+ FW_RI_RES_WR_IQESIZE_V(1));
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
@@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
- V_CQE_OPCODE(FW_RI_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->sq.qid));
- cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(FW_RI_SEND) |
+ CQE_TYPE_V(0) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(wq->sq.qid));
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
- V_CQE_OPCODE(swcqe->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->sq.qid));
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(swcqe->opcode) |
+ CQE_TYPE_V(1) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(wq->sq.qid));
CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
- cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
*/
PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
__func__, cidx, cq->sw_pidx);
- swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+ swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
swsqe->flushed = 1;
@@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
{
read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
read_cqe->len = htonl(wq->sq.oldest_read->read_len);
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(hw_cqe)) |
- V_CQE_OPCODE(FW_RI_READ_REQ) |
- V_CQE_TYPE(1));
+ read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
+ CQE_SWCQE_V(SW_CQE(hw_cqe)) |
+ CQE_OPCODE_V(FW_RI_READ_REQ) |
+ CQE_TYPE_V(1));
read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
}
@@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
} else {
swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
*swcqe = *hw_cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+ swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
t4_swcq_produce(&chp->cq);
}
next_cqe:
@@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
t4_set_wq_in_error(wq);
- hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+ hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
goto proc_cqe;
}
goto proc_cqe;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index eb5df4e6270..aafdbcd84fc 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -380,12 +380,12 @@ static int dump_stag(int id, void *p, void *data)
"stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
(u32)id<<8,
- G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
- G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
if (cc < space)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c9df0549f51..794555dc86a 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -50,12 +50,12 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
stag & 0xffffff00,
- G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
- G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
- G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
}
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index cb43c2299ac..6791fd16272 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
- req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+ req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
- ULPTX_NSGE(1));
+ ULPTX_NSGE_V(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
@@ -286,17 +286,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
else {
- tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
- V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
- V_FW_RI_TPTE_STAGSTATE(stag_state) |
- V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
- tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
- (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
- V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+ tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
+ FW_RI_TPTE_STAGSTATE_V(stag_state) |
+ FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
+ tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
+ FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
- V_FW_RI_TPTE_PS(page_size));
+ FW_RI_TPTE_PS_V(page_size));
tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
- V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+ FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
tpt.va_hi = cpu_to_be32((u32)(to >> 32));
tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bb85d479e66..15cae5a3101 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -272,7 +272,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- V_FW_RI_RES_WR_NRES(2) |
+ FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@@ -287,19 +287,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
- V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
- V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
- (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
- V_FW_RI_RES_WR_IQID(scq->cqid));
+ FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
+ FW_RI_RES_WR_IQID_V(scq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- V_FW_RI_RES_WR_DCAEN(0) |
- V_FW_RI_RES_WR_DCACPU(0) |
- V_FW_RI_RES_WR_FBMIN(2) |
- V_FW_RI_RES_WR_FBMAX(2) |
- V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
- V_FW_RI_RES_WR_CIDXFTHRESH(0) |
- V_FW_RI_RES_WR_EQSIZE(eqsize));
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
res++;
@@ -312,18 +312,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
- V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
- V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
- V_FW_RI_RES_WR_IQID(rcq->cqid));
+ FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ FW_RI_RES_WR_IQID_V(rcq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- V_FW_RI_RES_WR_DCAEN(0) |
- V_FW_RI_RES_WR_DCACPU(0) |
- V_FW_RI_RES_WR_FBMIN(2) |
- V_FW_RI_RES_WR_FBMAX(2) |
- V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
- V_FW_RI_RES_WR_CIDXFTHRESH(0) |
- V_FW_RI_RES_WR_EQSIZE(eqsize));
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
@@ -444,19 +444,19 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
case IB_WR_SEND:
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
else
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
wqe->send.stag_inv = 0;
break;
case IB_WR_SEND_WITH_INV:
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
else
wqe->send.sendop_pkd = cpu_to_be32(
- V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+ FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
break;
@@ -1283,8 +1283,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
- V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
- V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+ FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
+ FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
if (qhp->attr.mpa_attr.recv_marker_enabled)
wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
@@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (mm5) {
mm5->key = uresp.ma_sync_key;
mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
- + A_PCIE_MA_SYNC) & PAGE_MASK;
+ + PCIE_MA_SYNC_A) & PAGE_MASK;
mm5->len = PAGE_SIZE;
insert_mmap(ucontext, mm5);
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index c04e5134b30..871cdcac7be 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -41,7 +41,7 @@
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
-#define A_PCIE_MA_SYNC 0x30b4
+#define PCIE_MA_SYNC_A 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
@@ -184,44 +184,44 @@ struct t4_cqe {
/* macros for flit 0 of the cqe */
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0xFFFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define CQE_QPID_S 12
+#define CQE_QPID_M 0xFFFFF
+#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
+#define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
+
+#define CQE_SWCQE_S 11
+#define CQE_SWCQE_M 0x1
+#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
+#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
+
+#define CQE_STATUS_S 5
+#define CQE_STATUS_M 0x1F
+#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
+#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
+
+#define CQE_TYPE_S 4
+#define CQE_TYPE_M 0x1
+#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
+#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
+
+#define CQE_OPCODE_S 0
+#define CQE_OPCODE_M 0xF
+#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
+#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
+
+#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
#define CQE_LEN(x) (be32_to_cpu((x)->len))
@@ -237,25 +237,25 @@ struct t4_cqe {
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
/* macros for flit 3 of the cqe */
-#define S_CQE_GENBIT 63
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+#define CQE_GENBIT_S 63
+#define CQE_GENBIT_M 0x1
+#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
+#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
-#define S_CQE_OVFBIT 62
-#define M_CQE_OVFBIT 0x1
-#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+#define CQE_OVFBIT_S 62
+#define CQE_OVFBIT_M 0x1
+#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
-#define S_CQE_IQTYPE 60
-#define M_CQE_IQTYPE 0x3
-#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+#define CQE_IQTYPE_S 60
+#define CQE_IQTYPE_M 0x3
+#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
-#define M_CQE_TS 0x0fffffffffffffffULL
-#define G_CQE_TS(x) ((x) & M_CQE_TS)
+#define CQE_TS_M 0x0fffffffffffffffULL
+#define CQE_TS_G(x) ((x) & CQE_TS_M)
-#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
struct t4_swsqe {
u64 wr_id;
@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx);
- writel(PIDX_T5(inc), wq->sq.udb);
+ writel(PIDX_T5_V(inc), wq->sq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
- writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+ writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx);
- writel(PIDX_T5(inc), wq->rq.udb);
+ writel(PIDX_T5_V(inc), wq->rq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
- writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+ writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
}
static inline int t4_wq_in_error(struct t4_wq *wq)
@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
u32 val;
set_bit(CQ_ARMED, &cq->flags);
- while (cq->cidx_inc > CIDXINC_MASK) {
- val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
+ while (cq->cidx_inc > CIDXINC_M) {
+ val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
- cq->cidx_inc -= CIDXINC_MASK;
+ cq->cidx_inc -= CIDXINC_M;
}
- val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
- INGRESSQID(cq->cqid);
+ val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
- if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
+ if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val;
- val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
+ val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
+ INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5709e77faf7..5e53327fc64 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -162,102 +162,102 @@ struct fw_ri_tpte {
__be32 len_hi;
};
-#define S_FW_RI_TPTE_VALID 31
-#define M_FW_RI_TPTE_VALID 0x1
-#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
-#define G_FW_RI_TPTE_VALID(x) \
- (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
-#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
-
-#define S_FW_RI_TPTE_STAGKEY 23
-#define M_FW_RI_TPTE_STAGKEY 0xff
-#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
-#define G_FW_RI_TPTE_STAGKEY(x) \
- (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
-
-#define S_FW_RI_TPTE_STAGSTATE 22
-#define M_FW_RI_TPTE_STAGSTATE 0x1
-#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
-#define G_FW_RI_TPTE_STAGSTATE(x) \
- (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
-#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
-
-#define S_FW_RI_TPTE_STAGTYPE 20
-#define M_FW_RI_TPTE_STAGTYPE 0x3
-#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
-#define G_FW_RI_TPTE_STAGTYPE(x) \
- (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
-
-#define S_FW_RI_TPTE_PDID 0
-#define M_FW_RI_TPTE_PDID 0xfffff
-#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
-#define G_FW_RI_TPTE_PDID(x) \
- (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
-
-#define S_FW_RI_TPTE_PERM 28
-#define M_FW_RI_TPTE_PERM 0xf
-#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
-#define G_FW_RI_TPTE_PERM(x) \
- (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
-
-#define S_FW_RI_TPTE_REMINVDIS 27
-#define M_FW_RI_TPTE_REMINVDIS 0x1
-#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
-#define G_FW_RI_TPTE_REMINVDIS(x) \
- (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
-#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
-
-#define S_FW_RI_TPTE_ADDRTYPE 26
-#define M_FW_RI_TPTE_ADDRTYPE 1
-#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
-#define G_FW_RI_TPTE_ADDRTYPE(x) \
- (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
-#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
-
-#define S_FW_RI_TPTE_MWBINDEN 25
-#define M_FW_RI_TPTE_MWBINDEN 0x1
-#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
-#define G_FW_RI_TPTE_MWBINDEN(x) \
- (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
-#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
-
-#define S_FW_RI_TPTE_PS 20
-#define M_FW_RI_TPTE_PS 0x1f
-#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
-#define G_FW_RI_TPTE_PS(x) \
- (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
-
-#define S_FW_RI_TPTE_QPID 0
-#define M_FW_RI_TPTE_QPID 0xfffff
-#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
-#define G_FW_RI_TPTE_QPID(x) \
- (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
-
-#define S_FW_RI_TPTE_NOSNOOP 30
-#define M_FW_RI_TPTE_NOSNOOP 0x1
-#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
-#define G_FW_RI_TPTE_NOSNOOP(x) \
- (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
-#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
-
-#define S_FW_RI_TPTE_PBLADDR 0
-#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
-#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
-#define G_FW_RI_TPTE_PBLADDR(x) \
- (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
-
-#define S_FW_RI_TPTE_DCA 24
-#define M_FW_RI_TPTE_DCA 0x1f
-#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
-#define G_FW_RI_TPTE_DCA(x) \
- (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
-
-#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
-#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
-#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
- ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
-#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
- (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+#define FW_RI_TPTE_VALID_S 31
+#define FW_RI_TPTE_VALID_M 0x1
+#define FW_RI_TPTE_VALID_V(x) ((x) << FW_RI_TPTE_VALID_S)
+#define FW_RI_TPTE_VALID_G(x) \
+ (((x) >> FW_RI_TPTE_VALID_S) & FW_RI_TPTE_VALID_M)
+#define FW_RI_TPTE_VALID_F FW_RI_TPTE_VALID_V(1U)
+
+#define FW_RI_TPTE_STAGKEY_S 23
+#define FW_RI_TPTE_STAGKEY_M 0xff
+#define FW_RI_TPTE_STAGKEY_V(x) ((x) << FW_RI_TPTE_STAGKEY_S)
+#define FW_RI_TPTE_STAGKEY_G(x) \
+ (((x) >> FW_RI_TPTE_STAGKEY_S) & FW_RI_TPTE_STAGKEY_M)
+
+#define FW_RI_TPTE_STAGSTATE_S 22
+#define FW_RI_TPTE_STAGSTATE_M 0x1
+#define FW_RI_TPTE_STAGSTATE_V(x) ((x) << FW_RI_TPTE_STAGSTATE_S)
+#define FW_RI_TPTE_STAGSTATE_G(x) \
+ (((x) >> FW_RI_TPTE_STAGSTATE_S) & FW_RI_TPTE_STAGSTATE_M)
+#define FW_RI_TPTE_STAGSTATE_F FW_RI_TPTE_STAGSTATE_V(1U)
+
+#define FW_RI_TPTE_STAGTYPE_S 20
+#define FW_RI_TPTE_STAGTYPE_M 0x3
+#define FW_RI_TPTE_STAGTYPE_V(x) ((x) << FW_RI_TPTE_STAGTYPE_S)
+#define FW_RI_TPTE_STAGTYPE_G(x) \
+ (((x) >> FW_RI_TPTE_STAGTYPE_S) & FW_RI_TPTE_STAGTYPE_M)
+
+#define FW_RI_TPTE_PDID_S 0
+#define FW_RI_TPTE_PDID_M 0xfffff
+#define FW_RI_TPTE_PDID_V(x) ((x) << FW_RI_TPTE_PDID_S)
+#define FW_RI_TPTE_PDID_G(x) \
+ (((x) >> FW_RI_TPTE_PDID_S) & FW_RI_TPTE_PDID_M)
+
+#define FW_RI_TPTE_PERM_S 28
+#define FW_RI_TPTE_PERM_M 0xf
+#define FW_RI_TPTE_PERM_V(x) ((x) << FW_RI_TPTE_PERM_S)
+#define FW_RI_TPTE_PERM_G(x) \
+ (((x) >> FW_RI_TPTE_PERM_S) & FW_RI_TPTE_PERM_M)
+
+#define FW_RI_TPTE_REMINVDIS_S 27
+#define FW_RI_TPTE_REMINVDIS_M 0x1
+#define FW_RI_TPTE_REMINVDIS_V(x) ((x) << FW_RI_TPTE_REMINVDIS_S)
+#define FW_RI_TPTE_REMINVDIS_G(x) \
+ (((x) >> FW_RI_TPTE_REMINVDIS_S) & FW_RI_TPTE_REMINVDIS_M)
+#define FW_RI_TPTE_REMINVDIS_F FW_RI_TPTE_REMINVDIS_V(1U)
+
+#define FW_RI_TPTE_ADDRTYPE_S 26
+#define FW_RI_TPTE_ADDRTYPE_M 1
+#define FW_RI_TPTE_ADDRTYPE_V(x) ((x) << FW_RI_TPTE_ADDRTYPE_S)
+#define FW_RI_TPTE_ADDRTYPE_G(x) \
+ (((x) >> FW_RI_TPTE_ADDRTYPE_S) & FW_RI_TPTE_ADDRTYPE_M)
+#define FW_RI_TPTE_ADDRTYPE_F FW_RI_TPTE_ADDRTYPE_V(1U)
+
+#define FW_RI_TPTE_MWBINDEN_S 25
+#define FW_RI_TPTE_MWBINDEN_M 0x1
+#define FW_RI_TPTE_MWBINDEN_V(x) ((x) << FW_RI_TPTE_MWBINDEN_S)
+#define FW_RI_TPTE_MWBINDEN_G(x) \
+ (((x) >> FW_RI_TPTE_MWBINDEN_S) & FW_RI_TPTE_MWBINDEN_M)
+#define FW_RI_TPTE_MWBINDEN_F FW_RI_TPTE_MWBINDEN_V(1U)
+
+#define FW_RI_TPTE_PS_S 20
+#define FW_RI_TPTE_PS_M 0x1f
+#define FW_RI_TPTE_PS_V(x) ((x) << FW_RI_TPTE_PS_S)
+#define FW_RI_TPTE_PS_G(x) \
+ (((x) >> FW_RI_TPTE_PS_S) & FW_RI_TPTE_PS_M)
+
+#define FW_RI_TPTE_QPID_S 0
+#define FW_RI_TPTE_QPID_M 0xfffff
+#define FW_RI_TPTE_QPID_V(x) ((x) << FW_RI_TPTE_QPID_S)
+#define FW_RI_TPTE_QPID_G(x) \
+ (((x) >> FW_RI_TPTE_QPID_S) & FW_RI_TPTE_QPID_M)
+
+#define FW_RI_TPTE_NOSNOOP_S 30
+#define FW_RI_TPTE_NOSNOOP_M 0x1
+#define FW_RI_TPTE_NOSNOOP_V(x) ((x) << FW_RI_TPTE_NOSNOOP_S)
+#define FW_RI_TPTE_NOSNOOP_G(x) \
+ (((x) >> FW_RI_TPTE_NOSNOOP_S) & FW_RI_TPTE_NOSNOOP_M)
+#define FW_RI_TPTE_NOSNOOP_F FW_RI_TPTE_NOSNOOP_V(1U)
+
+#define FW_RI_TPTE_PBLADDR_S 0
+#define FW_RI_TPTE_PBLADDR_M 0x1fffffff
+#define FW_RI_TPTE_PBLADDR_V(x) ((x) << FW_RI_TPTE_PBLADDR_S)
+#define FW_RI_TPTE_PBLADDR_G(x) \
+ (((x) >> FW_RI_TPTE_PBLADDR_S) & FW_RI_TPTE_PBLADDR_M)
+
+#define FW_RI_TPTE_DCA_S 24
+#define FW_RI_TPTE_DCA_M 0x1f
+#define FW_RI_TPTE_DCA_V(x) ((x) << FW_RI_TPTE_DCA_S)
+#define FW_RI_TPTE_DCA_G(x) \
+ (((x) >> FW_RI_TPTE_DCA_S) & FW_RI_TPTE_DCA_M)
+
+#define FW_RI_TPTE_MWBCNT_PSTAG_S 0
+#define FW_RI_TPTE_MWBCNT_PSTAG_M 0xffffff
+#define FW_RI_TPTE_MWBCNT_PSTAT_V(x) \
+ ((x) << FW_RI_TPTE_MWBCNT_PSTAG_S)
+#define FW_RI_TPTE_MWBCNT_PSTAG_G(x) \
+ (((x) >> FW_RI_TPTE_MWBCNT_PSTAG_S) & FW_RI_TPTE_MWBCNT_PSTAG_M)
enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
@@ -308,222 +308,222 @@ struct fw_ri_res_wr {
#endif
};
-#define S_FW_RI_RES_WR_NRES 0
-#define M_FW_RI_RES_WR_NRES 0xff
-#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
-#define G_FW_RI_RES_WR_NRES(x) \
- (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
-
-#define S_FW_RI_RES_WR_FETCHSZM 26
-#define M_FW_RI_RES_WR_FETCHSZM 0x1
-#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
-#define G_FW_RI_RES_WR_FETCHSZM(x) \
- (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
-#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
-
-#define S_FW_RI_RES_WR_STATUSPGNS 25
-#define M_FW_RI_RES_WR_STATUSPGNS 0x1
-#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
-#define G_FW_RI_RES_WR_STATUSPGNS(x) \
- (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
-#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
-
-#define S_FW_RI_RES_WR_STATUSPGRO 24
-#define M_FW_RI_RES_WR_STATUSPGRO 0x1
-#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
-#define G_FW_RI_RES_WR_STATUSPGRO(x) \
- (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
-#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
-
-#define S_FW_RI_RES_WR_FETCHNS 23
-#define M_FW_RI_RES_WR_FETCHNS 0x1
-#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
-#define G_FW_RI_RES_WR_FETCHNS(x) \
- (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
-#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
-
-#define S_FW_RI_RES_WR_FETCHRO 22
-#define M_FW_RI_RES_WR_FETCHRO 0x1
-#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
-#define G_FW_RI_RES_WR_FETCHRO(x) \
- (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
-#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
-
-#define S_FW_RI_RES_WR_HOSTFCMODE 20
-#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
-#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
-#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
- (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
-
-#define S_FW_RI_RES_WR_CPRIO 19
-#define M_FW_RI_RES_WR_CPRIO 0x1
-#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
-#define G_FW_RI_RES_WR_CPRIO(x) \
- (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
-#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
-
-#define S_FW_RI_RES_WR_ONCHIP 18
-#define M_FW_RI_RES_WR_ONCHIP 0x1
-#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
-#define G_FW_RI_RES_WR_ONCHIP(x) \
- (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
-#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
-
-#define S_FW_RI_RES_WR_PCIECHN 16
-#define M_FW_RI_RES_WR_PCIECHN 0x3
-#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
-#define G_FW_RI_RES_WR_PCIECHN(x) \
- (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
-
-#define S_FW_RI_RES_WR_IQID 0
-#define M_FW_RI_RES_WR_IQID 0xffff
-#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
-#define G_FW_RI_RES_WR_IQID(x) \
- (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
-
-#define S_FW_RI_RES_WR_DCAEN 31
-#define M_FW_RI_RES_WR_DCAEN 0x1
-#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
-#define G_FW_RI_RES_WR_DCAEN(x) \
- (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
-#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
-
-#define S_FW_RI_RES_WR_DCACPU 26
-#define M_FW_RI_RES_WR_DCACPU 0x1f
-#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
-#define G_FW_RI_RES_WR_DCACPU(x) \
- (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
-
-#define S_FW_RI_RES_WR_FBMIN 23
-#define M_FW_RI_RES_WR_FBMIN 0x7
-#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
-#define G_FW_RI_RES_WR_FBMIN(x) \
- (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
-
-#define S_FW_RI_RES_WR_FBMAX 20
-#define M_FW_RI_RES_WR_FBMAX 0x7
-#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
-#define G_FW_RI_RES_WR_FBMAX(x) \
- (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
-
-#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
-#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
-#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
-#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
- (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
-#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
-
-#define S_FW_RI_RES_WR_CIDXFTHRESH 16
-#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
-#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
-#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
- (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
-
-#define S_FW_RI_RES_WR_EQSIZE 0
-#define M_FW_RI_RES_WR_EQSIZE 0xffff
-#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
-#define G_FW_RI_RES_WR_EQSIZE(x) \
- (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
-
-#define S_FW_RI_RES_WR_IQANDST 15
-#define M_FW_RI_RES_WR_IQANDST 0x1
-#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
-#define G_FW_RI_RES_WR_IQANDST(x) \
- (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
-#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
-
-#define S_FW_RI_RES_WR_IQANUS 14
-#define M_FW_RI_RES_WR_IQANUS 0x1
-#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
-#define G_FW_RI_RES_WR_IQANUS(x) \
- (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
-#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
-
-#define S_FW_RI_RES_WR_IQANUD 12
-#define M_FW_RI_RES_WR_IQANUD 0x3
-#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
-#define G_FW_RI_RES_WR_IQANUD(x) \
- (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
-
-#define S_FW_RI_RES_WR_IQANDSTINDEX 0
-#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
-#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
-#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
- (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
-
-#define S_FW_RI_RES_WR_IQDROPRSS 15
-#define M_FW_RI_RES_WR_IQDROPRSS 0x1
-#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
-#define G_FW_RI_RES_WR_IQDROPRSS(x) \
- (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
-#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
-
-#define S_FW_RI_RES_WR_IQGTSMODE 14
-#define M_FW_RI_RES_WR_IQGTSMODE 0x1
-#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
-#define G_FW_RI_RES_WR_IQGTSMODE(x) \
- (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
-#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
-
-#define S_FW_RI_RES_WR_IQPCIECH 12
-#define M_FW_RI_RES_WR_IQPCIECH 0x3
-#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
-#define G_FW_RI_RES_WR_IQPCIECH(x) \
- (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
-
-#define S_FW_RI_RES_WR_IQDCAEN 11
-#define M_FW_RI_RES_WR_IQDCAEN 0x1
-#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
-#define G_FW_RI_RES_WR_IQDCAEN(x) \
- (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
-#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
-
-#define S_FW_RI_RES_WR_IQDCACPU 6
-#define M_FW_RI_RES_WR_IQDCACPU 0x1f
-#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
-#define G_FW_RI_RES_WR_IQDCACPU(x) \
- (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
-
-#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
-#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
-#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
- ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
-#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
- (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
-
-#define S_FW_RI_RES_WR_IQO 3
-#define M_FW_RI_RES_WR_IQO 0x1
-#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
-#define G_FW_RI_RES_WR_IQO(x) \
- (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
-#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
-
-#define S_FW_RI_RES_WR_IQCPRIO 2
-#define M_FW_RI_RES_WR_IQCPRIO 0x1
-#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
-#define G_FW_RI_RES_WR_IQCPRIO(x) \
- (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
-#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
-
-#define S_FW_RI_RES_WR_IQESIZE 0
-#define M_FW_RI_RES_WR_IQESIZE 0x3
-#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
-#define G_FW_RI_RES_WR_IQESIZE(x) \
- (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
-
-#define S_FW_RI_RES_WR_IQNS 31
-#define M_FW_RI_RES_WR_IQNS 0x1
-#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
-#define G_FW_RI_RES_WR_IQNS(x) \
- (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
-#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
-
-#define S_FW_RI_RES_WR_IQRO 30
-#define M_FW_RI_RES_WR_IQRO 0x1
-#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
-#define G_FW_RI_RES_WR_IQRO(x) \
- (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
-#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
+#define FW_RI_RES_WR_NRES_S 0
+#define FW_RI_RES_WR_NRES_M 0xff
+#define FW_RI_RES_WR_NRES_V(x) ((x) << FW_RI_RES_WR_NRES_S)
+#define FW_RI_RES_WR_NRES_G(x) \
+ (((x) >> FW_RI_RES_WR_NRES_S) & FW_RI_RES_WR_NRES_M)
+
+#define FW_RI_RES_WR_FETCHSZM_S 26
+#define FW_RI_RES_WR_FETCHSZM_M 0x1
+#define FW_RI_RES_WR_FETCHSZM_V(x) ((x) << FW_RI_RES_WR_FETCHSZM_S)
+#define FW_RI_RES_WR_FETCHSZM_G(x) \
+ (((x) >> FW_RI_RES_WR_FETCHSZM_S) & FW_RI_RES_WR_FETCHSZM_M)
+#define FW_RI_RES_WR_FETCHSZM_F FW_RI_RES_WR_FETCHSZM_V(1U)
+
+#define FW_RI_RES_WR_STATUSPGNS_S 25
+#define FW_RI_RES_WR_STATUSPGNS_M 0x1
+#define FW_RI_RES_WR_STATUSPGNS_V(x) ((x) << FW_RI_RES_WR_STATUSPGNS_S)
+#define FW_RI_RES_WR_STATUSPGNS_G(x) \
+ (((x) >> FW_RI_RES_WR_STATUSPGNS_S) & FW_RI_RES_WR_STATUSPGNS_M)
+#define FW_RI_RES_WR_STATUSPGNS_F FW_RI_RES_WR_STATUSPGNS_V(1U)
+
+#define FW_RI_RES_WR_STATUSPGRO_S 24
+#define FW_RI_RES_WR_STATUSPGRO_M 0x1
+#define FW_RI_RES_WR_STATUSPGRO_V(x) ((x) << FW_RI_RES_WR_STATUSPGRO_S)
+#define FW_RI_RES_WR_STATUSPGRO_G(x) \
+ (((x) >> FW_RI_RES_WR_STATUSPGRO_S) & FW_RI_RES_WR_STATUSPGRO_M)
+#define FW_RI_RES_WR_STATUSPGRO_F FW_RI_RES_WR_STATUSPGRO_V(1U)
+
+#define FW_RI_RES_WR_FETCHNS_S 23
+#define FW_RI_RES_WR_FETCHNS_M 0x1
+#define FW_RI_RES_WR_FETCHNS_V(x) ((x) << FW_RI_RES_WR_FETCHNS_S)
+#define FW_RI_RES_WR_FETCHNS_G(x) \
+ (((x) >> FW_RI_RES_WR_FETCHNS_S) & FW_RI_RES_WR_FETCHNS_M)
+#define FW_RI_RES_WR_FETCHNS_F FW_RI_RES_WR_FETCHNS_V(1U)
+
+#define FW_RI_RES_WR_FETCHRO_S 22
+#define FW_RI_RES_WR_FETCHRO_M 0x1
+#define FW_RI_RES_WR_FETCHRO_V(x) ((x) << FW_RI_RES_WR_FETCHRO_S)
+#define FW_RI_RES_WR_FETCHRO_G(x) \
+ (((x) >> FW_RI_RES_WR_FETCHRO_S) & FW_RI_RES_WR_FETCHRO_M)
+#define FW_RI_RES_WR_FETCHRO_F FW_RI_RES_WR_FETCHRO_V(1U)
+
+#define FW_RI_RES_WR_HOSTFCMODE_S 20
+#define FW_RI_RES_WR_HOSTFCMODE_M 0x3
+#define FW_RI_RES_WR_HOSTFCMODE_V(x) ((x) << FW_RI_RES_WR_HOSTFCMODE_S)
+#define FW_RI_RES_WR_HOSTFCMODE_G(x) \
+ (((x) >> FW_RI_RES_WR_HOSTFCMODE_S) & FW_RI_RES_WR_HOSTFCMODE_M)
+
+#define FW_RI_RES_WR_CPRIO_S 19
+#define FW_RI_RES_WR_CPRIO_M 0x1
+#define FW_RI_RES_WR_CPRIO_V(x) ((x) << FW_RI_RES_WR_CPRIO_S)
+#define FW_RI_RES_WR_CPRIO_G(x) \
+ (((x) >> FW_RI_RES_WR_CPRIO_S) & FW_RI_RES_WR_CPRIO_M)
+#define FW_RI_RES_WR_CPRIO_F FW_RI_RES_WR_CPRIO_V(1U)
+
+#define FW_RI_RES_WR_ONCHIP_S 18
+#define FW_RI_RES_WR_ONCHIP_M 0x1
+#define FW_RI_RES_WR_ONCHIP_V(x) ((x) << FW_RI_RES_WR_ONCHIP_S)
+#define FW_RI_RES_WR_ONCHIP_G(x) \
+ (((x) >> FW_RI_RES_WR_ONCHIP_S) & FW_RI_RES_WR_ONCHIP_M)
+#define FW_RI_RES_WR_ONCHIP_F FW_RI_RES_WR_ONCHIP_V(1U)
+
+#define FW_RI_RES_WR_PCIECHN_S 16
+#define FW_RI_RES_WR_PCIECHN_M 0x3
+#define FW_RI_RES_WR_PCIECHN_V(x) ((x) << FW_RI_RES_WR_PCIECHN_S)
+#define FW_RI_RES_WR_PCIECHN_G(x) \
+ (((x) >> FW_RI_RES_WR_PCIECHN_S) & FW_RI_RES_WR_PCIECHN_M)
+
+#define FW_RI_RES_WR_IQID_S 0
+#define FW_RI_RES_WR_IQID_M 0xffff
+#define FW_RI_RES_WR_IQID_V(x) ((x) << FW_RI_RES_WR_IQID_S)
+#define FW_RI_RES_WR_IQID_G(x) \
+ (((x) >> FW_RI_RES_WR_IQID_S) & FW_RI_RES_WR_IQID_M)
+
+#define FW_RI_RES_WR_DCAEN_S 31
+#define FW_RI_RES_WR_DCAEN_M 0x1
+#define FW_RI_RES_WR_DCAEN_V(x) ((x) << FW_RI_RES_WR_DCAEN_S)
+#define FW_RI_RES_WR_DCAEN_G(x) \
+ (((x) >> FW_RI_RES_WR_DCAEN_S) & FW_RI_RES_WR_DCAEN_M)
+#define FW_RI_RES_WR_DCAEN_F FW_RI_RES_WR_DCAEN_V(1U)
+
+#define FW_RI_RES_WR_DCACPU_S 26
+#define FW_RI_RES_WR_DCACPU_M 0x1f
+#define FW_RI_RES_WR_DCACPU_V(x) ((x) << FW_RI_RES_WR_DCACPU_S)
+#define FW_RI_RES_WR_DCACPU_G(x) \
+ (((x) >> FW_RI_RES_WR_DCACPU_S) & FW_RI_RES_WR_DCACPU_M)
+
+#define FW_RI_RES_WR_FBMIN_S 23
+#define FW_RI_RES_WR_FBMIN_M 0x7
+#define FW_RI_RES_WR_FBMIN_V(x) ((x) << FW_RI_RES_WR_FBMIN_S)
+#define FW_RI_RES_WR_FBMIN_G(x) \
+ (((x) >> FW_RI_RES_WR_FBMIN_S) & FW_RI_RES_WR_FBMIN_M)
+
+#define FW_RI_RES_WR_FBMAX_S 20
+#define FW_RI_RES_WR_FBMAX_M 0x7
+#define FW_RI_RES_WR_FBMAX_V(x) ((x) << FW_RI_RES_WR_FBMAX_S)
+#define FW_RI_RES_WR_FBMAX_G(x) \
+ (((x) >> FW_RI_RES_WR_FBMAX_S) & FW_RI_RES_WR_FBMAX_M)
+
+#define FW_RI_RES_WR_CIDXFTHRESHO_S 19
+#define FW_RI_RES_WR_CIDXFTHRESHO_M 0x1
+#define FW_RI_RES_WR_CIDXFTHRESHO_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESHO_S)
+#define FW_RI_RES_WR_CIDXFTHRESHO_G(x) \
+ (((x) >> FW_RI_RES_WR_CIDXFTHRESHO_S) & FW_RI_RES_WR_CIDXFTHRESHO_M)
+#define FW_RI_RES_WR_CIDXFTHRESHO_F FW_RI_RES_WR_CIDXFTHRESHO_V(1U)
+
+#define FW_RI_RES_WR_CIDXFTHRESH_S 16
+#define FW_RI_RES_WR_CIDXFTHRESH_M 0x7
+#define FW_RI_RES_WR_CIDXFTHRESH_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESH_S)
+#define FW_RI_RES_WR_CIDXFTHRESH_G(x) \
+ (((x) >> FW_RI_RES_WR_CIDXFTHRESH_S) & FW_RI_RES_WR_CIDXFTHRESH_M)
+
+#define FW_RI_RES_WR_EQSIZE_S 0
+#define FW_RI_RES_WR_EQSIZE_M 0xffff
+#define FW_RI_RES_WR_EQSIZE_V(x) ((x) << FW_RI_RES_WR_EQSIZE_S)
+#define FW_RI_RES_WR_EQSIZE_G(x) \
+ (((x) >> FW_RI_RES_WR_EQSIZE_S) & FW_RI_RES_WR_EQSIZE_M)
+
+#define FW_RI_RES_WR_IQANDST_S 15
+#define FW_RI_RES_WR_IQANDST_M 0x1
+#define FW_RI_RES_WR_IQANDST_V(x) ((x) << FW_RI_RES_WR_IQANDST_S)
+#define FW_RI_RES_WR_IQANDST_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANDST_S) & FW_RI_RES_WR_IQANDST_M)
+#define FW_RI_RES_WR_IQANDST_F FW_RI_RES_WR_IQANDST_V(1U)
+
+#define FW_RI_RES_WR_IQANUS_S 14
+#define FW_RI_RES_WR_IQANUS_M 0x1
+#define FW_RI_RES_WR_IQANUS_V(x) ((x) << FW_RI_RES_WR_IQANUS_S)
+#define FW_RI_RES_WR_IQANUS_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANUS_S) & FW_RI_RES_WR_IQANUS_M)
+#define FW_RI_RES_WR_IQANUS_F FW_RI_RES_WR_IQANUS_V(1U)
+
+#define FW_RI_RES_WR_IQANUD_S 12
+#define FW_RI_RES_WR_IQANUD_M 0x3
+#define FW_RI_RES_WR_IQANUD_V(x) ((x) << FW_RI_RES_WR_IQANUD_S)
+#define FW_RI_RES_WR_IQANUD_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANUD_S) & FW_RI_RES_WR_IQANUD_M)
+
+#define FW_RI_RES_WR_IQANDSTINDEX_S 0
+#define FW_RI_RES_WR_IQANDSTINDEX_M 0xfff
+#define FW_RI_RES_WR_IQANDSTINDEX_V(x) ((x) << FW_RI_RES_WR_IQANDSTINDEX_S)
+#define FW_RI_RES_WR_IQANDSTINDEX_G(x) \
+ (((x) >> FW_RI_RES_WR_IQANDSTINDEX_S) & FW_RI_RES_WR_IQANDSTINDEX_M)
+
+#define FW_RI_RES_WR_IQDROPRSS_S 15
+#define FW_RI_RES_WR_IQDROPRSS_M 0x1
+#define FW_RI_RES_WR_IQDROPRSS_V(x) ((x) << FW_RI_RES_WR_IQDROPRSS_S)
+#define FW_RI_RES_WR_IQDROPRSS_G(x) \
+ (((x) >> FW_RI_RES_WR_IQDROPRSS_S) & FW_RI_RES_WR_IQDROPRSS_M)
+#define FW_RI_RES_WR_IQDROPRSS_F FW_RI_RES_WR_IQDROPRSS_V(1U)
+
+#define FW_RI_RES_WR_IQGTSMODE_S 14
+#define FW_RI_RES_WR_IQGTSMODE_M 0x1
+#define FW_RI_RES_WR_IQGTSMODE_V(x) ((x) << FW_RI_RES_WR_IQGTSMODE_S)
+#define FW_RI_RES_WR_IQGTSMODE_G(x) \
+ (((x) >> FW_RI_RES_WR_IQGTSMODE_S) & FW_RI_RES_WR_IQGTSMODE_M)
+#define FW_RI_RES_WR_IQGTSMODE_F FW_RI_RES_WR_IQGTSMODE_V(1U)
+
+#define FW_RI_RES_WR_IQPCIECH_S 12
+#define FW_RI_RES_WR_IQPCIECH_M 0x3
+#define FW_RI_RES_WR_IQPCIECH_V(x) ((x) << FW_RI_RES_WR_IQPCIECH_S)
+#define FW_RI_RES_WR_IQPCIECH_G(x) \
+ (((x) >> FW_RI_RES_WR_IQPCIECH_S) & FW_RI_RES_WR_IQPCIECH_M)
+
+#define FW_RI_RES_WR_IQDCAEN_S 11
+#define FW_RI_RES_WR_IQDCAEN_M 0x1
+#define FW_RI_RES_WR_IQDCAEN_V(x) ((x) << FW_RI_RES_WR_IQDCAEN_S)
+#define FW_RI_RES_WR_IQDCAEN_G(x) \
+ (((x) >> FW_RI_RES_WR_IQDCAEN_S) & FW_RI_RES_WR_IQDCAEN_M)
+#define FW_RI_RES_WR_IQDCAEN_F FW_RI_RES_WR_IQDCAEN_V(1U)
+
+#define FW_RI_RES_WR_IQDCACPU_S 6
+#define FW_RI_RES_WR_IQDCACPU_M 0x1f
+#define FW_RI_RES_WR_IQDCACPU_V(x) ((x) << FW_RI_RES_WR_IQDCACPU_S)
+#define FW_RI_RES_WR_IQDCACPU_G(x) \
+ (((x) >> FW_RI_RES_WR_IQDCACPU_S) & FW_RI_RES_WR_IQDCACPU_M)
+
+#define FW_RI_RES_WR_IQINTCNTTHRESH_S 4
+#define FW_RI_RES_WR_IQINTCNTTHRESH_M 0x3
+#define FW_RI_RES_WR_IQINTCNTTHRESH_V(x) \
+ ((x) << FW_RI_RES_WR_IQINTCNTTHRESH_S)
+#define FW_RI_RES_WR_IQINTCNTTHRESH_G(x) \
+ (((x) >> FW_RI_RES_WR_IQINTCNTTHRESH_S) & FW_RI_RES_WR_IQINTCNTTHRESH_M)
+
+#define FW_RI_RES_WR_IQO_S 3
+#define FW_RI_RES_WR_IQO_M 0x1
+#define FW_RI_RES_WR_IQO_V(x) ((x) << FW_RI_RES_WR_IQO_S)
+#define FW_RI_RES_WR_IQO_G(x) \
+ (((x) >> FW_RI_RES_WR_IQO_S) & FW_RI_RES_WR_IQO_M)
+#define FW_RI_RES_WR_IQO_F FW_RI_RES_WR_IQO_V(1U)
+
+#define FW_RI_RES_WR_IQCPRIO_S 2
+#define FW_RI_RES_WR_IQCPRIO_M 0x1
+#define FW_RI_RES_WR_IQCPRIO_V(x) ((x) << FW_RI_RES_WR_IQCPRIO_S)
+#define FW_RI_RES_WR_IQCPRIO_G(x) \
+ (((x) >> FW_RI_RES_WR_IQCPRIO_S) & FW_RI_RES_WR_IQCPRIO_M)
+#define FW_RI_RES_WR_IQCPRIO_F FW_RI_RES_WR_IQCPRIO_V(1U)
+
+#define FW_RI_RES_WR_IQESIZE_S 0
+#define FW_RI_RES_WR_IQESIZE_M 0x3
+#define FW_RI_RES_WR_IQESIZE_V(x) ((x) << FW_RI_RES_WR_IQESIZE_S)
+#define FW_RI_RES_WR_IQESIZE_G(x) \
+ (((x) >> FW_RI_RES_WR_IQESIZE_S) & FW_RI_RES_WR_IQESIZE_M)
+
+#define FW_RI_RES_WR_IQNS_S 31
+#define FW_RI_RES_WR_IQNS_M 0x1
+#define FW_RI_RES_WR_IQNS_V(x) ((x) << FW_RI_RES_WR_IQNS_S)
+#define FW_RI_RES_WR_IQNS_G(x) \
+ (((x) >> FW_RI_RES_WR_IQNS_S) & FW_RI_RES_WR_IQNS_M)
+#define FW_RI_RES_WR_IQNS_F FW_RI_RES_WR_IQNS_V(1U)
+
+#define FW_RI_RES_WR_IQRO_S 30
+#define FW_RI_RES_WR_IQRO_M 0x1
+#define FW_RI_RES_WR_IQRO_V(x) ((x) << FW_RI_RES_WR_IQRO_S)
+#define FW_RI_RES_WR_IQRO_G(x) \
+ (((x) >> FW_RI_RES_WR_IQRO_S) & FW_RI_RES_WR_IQRO_M)
+#define FW_RI_RES_WR_IQRO_F FW_RI_RES_WR_IQRO_V(1U)
struct fw_ri_rdma_write_wr {
__u8 opcode;
@@ -562,11 +562,11 @@ struct fw_ri_send_wr {
#endif
};
-#define S_FW_RI_SEND_WR_SENDOP 0
-#define M_FW_RI_SEND_WR_SENDOP 0xf
-#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
-#define G_FW_RI_SEND_WR_SENDOP(x) \
- (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+#define FW_RI_SEND_WR_SENDOP_S 0
+#define FW_RI_SEND_WR_SENDOP_M 0xf
+#define FW_RI_SEND_WR_SENDOP_V(x) ((x) << FW_RI_SEND_WR_SENDOP_S)
+#define FW_RI_SEND_WR_SENDOP_G(x) \
+ (((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
struct fw_ri_rdma_read_wr {
__u8 opcode;
@@ -612,25 +612,25 @@ struct fw_ri_bind_mw_wr {
__be64 r4;
};
-#define S_FW_RI_BIND_MW_WR_QPBINDE 6
-#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
-#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
-#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
- (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
-#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+#define FW_RI_BIND_MW_WR_QPBINDE_S 6
+#define FW_RI_BIND_MW_WR_QPBINDE_M 0x1
+#define FW_RI_BIND_MW_WR_QPBINDE_V(x) ((x) << FW_RI_BIND_MW_WR_QPBINDE_S)
+#define FW_RI_BIND_MW_WR_QPBINDE_G(x) \
+ (((x) >> FW_RI_BIND_MW_WR_QPBINDE_S) & FW_RI_BIND_MW_WR_QPBINDE_M)
+#define FW_RI_BIND_MW_WR_QPBINDE_F FW_RI_BIND_MW_WR_QPBINDE_V(1U)
-#define S_FW_RI_BIND_MW_WR_NS 5
-#define M_FW_RI_BIND_MW_WR_NS 0x1
-#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
-#define G_FW_RI_BIND_MW_WR_NS(x) \
- (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
-#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
+#define FW_RI_BIND_MW_WR_NS_S 5
+#define FW_RI_BIND_MW_WR_NS_M 0x1
+#define FW_RI_BIND_MW_WR_NS_V(x) ((x) << FW_RI_BIND_MW_WR_NS_S)
+#define FW_RI_BIND_MW_WR_NS_G(x) \
+ (((x) >> FW_RI_BIND_MW_WR_NS_S) & FW_RI_BIND_MW_WR_NS_M)
+#define FW_RI_BIND_MW_WR_NS_F FW_RI_BIND_MW_WR_NS_V(1U)
-#define S_FW_RI_BIND_MW_WR_DCACPU 0
-#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
-#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
-#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
- (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+#define FW_RI_BIND_MW_WR_DCACPU_S 0
+#define FW_RI_BIND_MW_WR_DCACPU_M 0x1f
+#define FW_RI_BIND_MW_WR_DCACPU_V(x) ((x) << FW_RI_BIND_MW_WR_DCACPU_S)
+#define FW_RI_BIND_MW_WR_DCACPU_G(x) \
+ (((x) >> FW_RI_BIND_MW_WR_DCACPU_S) & FW_RI_BIND_MW_WR_DCACPU_M)
struct fw_ri_fr_nsmr_wr {
__u8 opcode;
@@ -649,25 +649,25 @@ struct fw_ri_fr_nsmr_wr {
__be32 va_lo_fbo;
};
-#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
-#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
-#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
-#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
- (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
-#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+#define FW_RI_FR_NSMR_WR_QPBINDE_S 6
+#define FW_RI_FR_NSMR_WR_QPBINDE_M 0x1
+#define FW_RI_FR_NSMR_WR_QPBINDE_V(x) ((x) << FW_RI_FR_NSMR_WR_QPBINDE_S)
+#define FW_RI_FR_NSMR_WR_QPBINDE_G(x) \
+ (((x) >> FW_RI_FR_NSMR_WR_QPBINDE_S) & FW_RI_FR_NSMR_WR_QPBINDE_M)
+#define FW_RI_FR_NSMR_WR_QPBINDE_F FW_RI_FR_NSMR_WR_QPBINDE_V(1U)
-#define S_FW_RI_FR_NSMR_WR_NS 5
-#define M_FW_RI_FR_NSMR_WR_NS 0x1
-#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
-#define G_FW_RI_FR_NSMR_WR_NS(x) \
- (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
-#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
+#define FW_RI_FR_NSMR_WR_NS_S 5
+#define FW_RI_FR_NSMR_WR_NS_M 0x1
+#define FW_RI_FR_NSMR_WR_NS_V(x) ((x) << FW_RI_FR_NSMR_WR_NS_S)
+#define FW_RI_FR_NSMR_WR_NS_G(x) \
+ (((x) >> FW_RI_FR_NSMR_WR_NS_S) & FW_RI_FR_NSMR_WR_NS_M)
+#define FW_RI_FR_NSMR_WR_NS_F FW_RI_FR_NSMR_WR_NS_V(1U)
-#define S_FW_RI_FR_NSMR_WR_DCACPU 0
-#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
-#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
-#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
- (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+#define FW_RI_FR_NSMR_WR_DCACPU_S 0
+#define FW_RI_FR_NSMR_WR_DCACPU_M 0x1f
+#define FW_RI_FR_NSMR_WR_DCACPU_V(x) ((x) << FW_RI_FR_NSMR_WR_DCACPU_S)
+#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
+ (((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
struct fw_ri_inv_lstag_wr {
__u8 opcode;
@@ -740,18 +740,18 @@ struct fw_ri_wr {
} u;
};
-#define S_FW_RI_WR_MPAREQBIT 7
-#define M_FW_RI_WR_MPAREQBIT 0x1
-#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
-#define G_FW_RI_WR_MPAREQBIT(x) \
- (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
-#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
+#define FW_RI_WR_MPAREQBIT_S 7
+#define FW_RI_WR_MPAREQBIT_M 0x1
+#define FW_RI_WR_MPAREQBIT_V(x) ((x) << FW_RI_WR_MPAREQBIT_S)
+#define FW_RI_WR_MPAREQBIT_G(x) \
+ (((x) >> FW_RI_WR_MPAREQBIT_S) & FW_RI_WR_MPAREQBIT_M)
+#define FW_RI_WR_MPAREQBIT_F FW_RI_WR_MPAREQBIT_V(1U)
-#define S_FW_RI_WR_P2PTYPE 0
-#define M_FW_RI_WR_P2PTYPE 0xf
-#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
-#define G_FW_RI_WR_P2PTYPE(x) \
- (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define FW_RI_WR_P2PTYPE_S 0
+#define FW_RI_WR_P2PTYPE_M 0xf
+#define FW_RI_WR_P2PTYPE_V(x) ((x) << FW_RI_WR_P2PTYPE_S)
+#define FW_RI_WR_P2PTYPE_G(x) \
+ (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
struct tcp_options {
__be16 mss;
@@ -783,58 +783,58 @@ struct cpl_pass_accept_req {
};
/* cpl_pass_accept_req.hdr_len fields */
-#define S_SYN_RX_CHAN 0
-#define M_SYN_RX_CHAN 0xF
-#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
-#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
-
-#define S_TCP_HDR_LEN 10
-#define M_TCP_HDR_LEN 0x3F
-#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
-#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
-
-#define S_IP_HDR_LEN 16
-#define M_IP_HDR_LEN 0x3FF
-#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
-#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
-
-#define S_ETH_HDR_LEN 26
-#define M_ETH_HDR_LEN 0x1F
-#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
-#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+#define SYN_RX_CHAN_S 0
+#define SYN_RX_CHAN_M 0xF
+#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
+#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
+
+#define TCP_HDR_LEN_S 10
+#define TCP_HDR_LEN_M 0x3F
+#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
+#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
+
+#define IP_HDR_LEN_S 16
+#define IP_HDR_LEN_M 0x3FF
+#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
+#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
+
+#define ETH_HDR_LEN_S 26
+#define ETH_HDR_LEN_M 0x1F
+#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
+#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
/* cpl_pass_accept_req.l2info fields */
-#define S_SYN_MAC_IDX 0
-#define M_SYN_MAC_IDX 0x1FF
-#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
-#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+#define SYN_MAC_IDX_S 0
+#define SYN_MAC_IDX_M 0x1FF
+#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
+#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
-#define S_SYN_XACT_MATCH 9
-#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
-#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
+#define SYN_XACT_MATCH_S 9
+#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
+#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
-#define S_SYN_INTF 12
-#define M_SYN_INTF 0xF
-#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
-#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+#define SYN_INTF_S 12
+#define SYN_INTF_M 0xF
+#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
+#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
struct ulptx_idata {
__be32 cmd_more;
__be32 len;
};
-#define S_ULPTX_NSGE 0
-#define M_ULPTX_NSGE 0xFFFF
-#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#define ULPTX_NSGE_S 0
+#define ULPTX_NSGE_M 0xFFFF
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
-#define S_RX_DACK_MODE 29
-#define M_RX_DACK_MODE 0x3
-#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
-#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+#define RX_DACK_MODE_S 29
+#define RX_DACK_MODE_M 0x3
+#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
+#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
-#define S_RX_DACK_CHANGE 31
-#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
-#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+#define RX_DACK_CHANGE_S 31
+#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
+#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
@@ -843,10 +843,10 @@ enum { /* TCP congestion control algorithms */
CONG_ALG_HIGHSPEED
};
-#define S_CONG_CNTRL 14
-#define M_CONG_CNTRL 0x3
-#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
-#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+#define CONG_CNTRL_S 14
+#define CONG_CNTRL_M 0x3
+#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
+#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
#define CONG_CNTRL_VALID (1 << 18)
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 2d8c3397774..f50a546224a 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/inet.h>
#include <linux/string.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0eb141c4141..a31e031afd8 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
continue;
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
- if (slave_id >= dev->dev->num_vfs + 1)
+ if (slave_id >= dev->dev->persist->num_vfs + 1)
return;
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a3b70f6c403..543ecdd8667 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
+ INIT_LIST_HEAD(&cq->send_qp_list);
+ INIT_LIST_HEAD(&cq->recv_qp_list);
if (context) {
struct mlx4_ib_create_cq ucmd;
@@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
return 0;
}
+static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled, int is_send)
+{
+ struct mlx4_ib_wq *wq;
+ unsigned cur;
+ int i;
+
+ wq = is_send ? &qp->sq : &qp->rq;
+ cur = wq->head - wq->tail;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && *npolled < num_entries; i++) {
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ (*npolled)++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+}
+
+static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx4_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and retrun
+ * simulated FLUSH_ERR completions
+ */
+ list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
+ mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
+ if (*npolled >= num_entries)
+ goto out;
+ }
+
+ list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
+ mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
+ if (*npolled >= num_entries)
+ goto out;
+ }
+
+out:
+ return;
+}
+
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
unsigned long flags;
int npolled;
int err = 0;
+ struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ goto out;
+ }
for (npolled = 0; npolled < num_entries; ++npolled) {
err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
@@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
mlx4_cq_set_ci(&cq->mcq);
+out:
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 82a7dd87089..c7619716c31 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
ctx->ib_dev = &dev->ib_dev;
for (i = 0;
- i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
+ i < min(dev->dev->caps.sqp_demux,
+ (u16)(dev->dev->persist->num_vfs + 1));
i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev->dev, i);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 9117b7a2d5f..eb8e215f161 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
- props->vendor_part_id = dev->dev->pdev->device;
+ props->vendor_part_id = dev->dev->persist->pdev->device;
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
enum ib_mtu tmp;
struct mlx4_cmd_mailbox *mailbox;
int err = 0;
+ int is_bonded = mlx4_is_bonded(mdev->dev);
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
props->active_mtu = IB_MTU_256;
+ if (is_bonded)
+ rtnl_lock(); /* required to get upper dev */
spin_lock_bh(&iboe->lock);
ndev = iboe->netdevs[port - 1];
+ if (ndev && is_bonded)
+ ndev = netdev_master_upper_dev_get(ndev);
if (!ndev)
goto out_unlock;
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = state_to_phys_state(props->state);
out_unlock:
spin_unlock_bh(&iboe->lock);
+ if (is_bonded)
+ rtnl_unlock();
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
struct mlx4_ib_steering {
struct list_head list;
- u64 reg_id;
+ struct mlx4_flow_reg_id reg_id;
union ib_gid gid;
};
@@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
{
- int err = 0, i = 0;
+ int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
enum mlx4_net_trans_promisc_mode type[2];
+ struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
+ int is_bonded = mlx4_is_bonded(dev);
memset(type, 0, sizeof(type));
@@ -1172,26 +1181,58 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
while (i < ARRAY_SIZE(type) && type[i]) {
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
- &mflow->reg_id[i]);
+ &mflow->reg_id[i].id);
if (err)
goto err_create_flow;
i++;
+ if (is_bonded) {
+ /* Application always sees one port so the mirror rule
+ * must be on port #2
+ */
+ flow_attr->port = 2;
+ err = __mlx4_ib_create_flow(qp, flow_attr,
+ domain, type[j],
+ &mflow->reg_id[j].mirror);
+ flow_attr->port = 1;
+ if (err)
+ goto err_create_flow;
+ j++;
+ }
+
}
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+ &mflow->reg_id[i].id);
if (err)
goto err_create_flow;
i++;
+ if (is_bonded) {
+ flow_attr->port = 2;
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+ &mflow->reg_id[j].mirror);
+ flow_attr->port = 1;
+ if (err)
+ goto err_create_flow;
+ j++;
+ }
+ /* function to create mirror rule */
}
return &mflow->ibflow;
err_create_flow:
while (i) {
- (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+ (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+ mflow->reg_id[i].id);
i--;
}
+
+ while (j) {
+ (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+ mflow->reg_id[j].mirror);
+ j--;
+ }
err_free:
kfree(mflow);
return ERR_PTR(err);
@@ -1204,10 +1245,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
struct mlx4_ib_flow *mflow = to_mflow(flow_id);
- while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
- err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
+ while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
+ err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
if (err)
ret = err;
+ if (mflow->reg_id[i].mirror) {
+ err = __mlx4_ib_destroy_flow(mdev->dev,
+ mflow->reg_id[i].mirror);
+ if (err)
+ ret = err;
+ }
i++;
}
@@ -1219,11 +1266,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_dev *dev = mdev->dev;
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
- u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+ struct mlx4_flow_reg_id reg_id;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1235,10 +1283,21 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- prot, &reg_id);
+ prot, &reg_id.id);
if (err)
goto err_malloc;
+ reg_id.mirror = 0;
+ if (mlx4_is_bonded(dev)) {
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
+ (mqp->port == 1) ? 2 : 1,
+ !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ prot, &reg_id.mirror);
+ if (err)
+ goto err_add;
+ }
+
err = add_gid_entry(ibqp, gid);
if (err)
goto err_add;
@@ -1254,7 +1313,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err_add:
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- prot, reg_id);
+ prot, reg_id.id);
+ if (reg_id.mirror)
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ prot, reg_id.mirror);
err_malloc:
kfree(ib_steering);
@@ -1281,10 +1343,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_dev *dev = mdev->dev;
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
- u64 reg_id = 0;
+ struct mlx4_flow_reg_id reg_id = {0, 0};
+
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
@@ -1309,10 +1373,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- prot, reg_id);
+ prot, reg_id.id);
if (err)
return err;
+ if (mlx4_is_bonded(dev)) {
+ err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ prot, reg_id.mirror);
+ if (err)
+ return err;
+ }
+
mutex_lock(&mqp->mutex);
ge = find_gid_entry(mqp, gid->raw);
if (ge) {
@@ -1376,7 +1447,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
+ return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -1440,6 +1511,7 @@ static void update_gids_task(struct work_struct *work)
union ib_gid *gids;
int err;
struct mlx4_dev *dev = gw->dev->dev;
+ int is_bonded = mlx4_is_bonded(dev);
if (!gw->dev->ib_active)
return;
@@ -1459,7 +1531,10 @@ static void update_gids_task(struct work_struct *work)
if (err)
pr_warn("set port command failed\n");
else
- mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
+ if ((gw->port == 1) || !is_bonded)
+ mlx4_ib_dispatch_event(gw->dev,
+ is_bonded ? 1 : gw->port,
+ IB_EVENT_GID_CHANGE);
mlx4_free_cmd_mailbox(dev, mailbox);
kfree(gw);
@@ -1875,7 +1950,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
* don't want the bond IP based gids in the table since
* flows that select port by gid may get the down port.
*/
- if (port_state == IB_PORT_DOWN) {
+ if (port_state == IB_PORT_DOWN &&
+ !mlx4_is_bonded(ibdev->dev)) {
reset_gid_table(ibdev, port);
mlx4_ib_set_default_gid(ibdev,
curr_netdev,
@@ -1938,7 +2014,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
int i;
if (mlx4_is_master(ibdev->dev)) {
- for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
+ for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
+ ++slave) {
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
for (i = 0;
i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
@@ -1995,7 +2072,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
for (j = 0; j < eq_per_port; j++) {
snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
- i, j, dev->pdev->bus->name);
+ i, j, dev->persist->pdev->bus->name);
/* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(dev, name, NULL,
&ibdev->eq_table[eq])) {
@@ -2046,6 +2123,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int err;
struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
+ int num_req_counters;
pr_info_once("%s", mlx4_ib_version);
@@ -2059,7 +2137,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
- dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
+ dev_err(&dev->persist->pdev->dev,
+ "Device struct alloc failed\n");
return NULL;
}
@@ -2078,15 +2157,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
ibdev->dev = dev;
+ ibdev->bond_next_port = 0;
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
ibdev->num_ports = num_ports;
- ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
+ ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
+ 1 : ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
- ibdev->ib_dev.dma_device = &dev->pdev->dev;
+ ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@@ -2205,7 +2286,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
- for (i = 0; i < ibdev->num_ports; ++i) {
+ num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
+ for (i = 0; i < num_req_counters; ++i) {
mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
@@ -2216,12 +2298,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
}
+ if (mlx4_is_bonded(dev))
+ for (i = 1; i < ibdev->num_ports ; ++i)
+ ibdev->counters[i] = ibdev->counters[0];
+
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
+ INIT_LIST_HEAD(&ibdev->qp_list);
+ spin_lock_init(&ibdev->reset_flow_resource_lock);
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
ib_num_ports) {
@@ -2237,7 +2325,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
sizeof(long),
GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap) {
- dev_err(&dev->pdev->dev, "bit map alloc failed\n");
+ dev_err(&dev->persist->pdev->dev,
+ "bit map alloc failed\n");
goto err_steer_qp_release;
}
@@ -2535,6 +2624,99 @@ out:
return;
}
+static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
+{
+ struct mlx4_ib_qp *mqp;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ struct mlx4_ib_cq *send_mcq, *recv_mcq;
+ struct list_head cq_notify_list;
+ struct mlx4_cq *mcq;
+ unsigned long flags;
+
+ pr_warn("mlx4_ib_handle_catas_error was started\n");
+ INIT_LIST_HEAD(&cq_notify_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_notify_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ /* Now, handle the QP's receive queue */
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_notify_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+
+ list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
+ mcq->comp(mcq);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+ pr_warn("mlx4_ib_handle_catas_error ended\n");
+}
+
+static void handle_bonded_port_state_event(struct work_struct *work)
+{
+ struct ib_event_work *ew =
+ container_of(work, struct ib_event_work, work);
+ struct mlx4_ib_dev *ibdev = ew->ib_dev;
+ enum ib_port_state bonded_port_state = IB_PORT_NOP;
+ int i;
+ struct ib_event ibev;
+
+ kfree(ew);
+ spin_lock_bh(&ibdev->iboe.lock);
+ for (i = 0; i < MLX4_MAX_PORTS; ++i) {
+ struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+
+ enum ib_port_state curr_port_state =
+ (netif_running(curr_netdev) &&
+ netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+
+ bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
+ curr_port_state : IB_PORT_ACTIVE;
+ }
+ spin_unlock_bh(&ibdev->iboe.lock);
+
+ ibev.device = &ibdev->ib_dev;
+ ibev.element.port_num = 1;
+ ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+ ib_dispatch_event(&ibev);
+}
+
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, unsigned long param)
{
@@ -2544,6 +2726,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
struct ib_event_work *ew;
int p = 0;
+ if (mlx4_is_bonded(dev) &&
+ ((event == MLX4_DEV_EVENT_PORT_UP) ||
+ (event == MLX4_DEV_EVENT_PORT_DOWN))) {
+ ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+ if (!ew)
+ return;
+ INIT_WORK(&ew->work, handle_bonded_port_state_event);
+ ew->ib_dev = ibdev;
+ queue_work(wq, &ew->work);
+ return;
+ }
+
if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
eqe = (struct mlx4_eqe *)param;
else
@@ -2570,6 +2764,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx4_ib_handle_catas_error(ibdev);
break;
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
@@ -2604,7 +2799,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
ibev.device = ibdev_ptr;
- ibev.element.port_num = (u8) p;
+ ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
ib_dispatch_event(&ibev);
}
@@ -2613,7 +2808,8 @@ static struct mlx4_interface mlx4_ib_interface = {
.add = mlx4_ib_add,
.remove = mlx4_ib_remove,
.event = mlx4_ib_event,
- .protocol = MLX4_PROT_IB_IPV6
+ .protocol = MLX4_PROT_IB_IPV6,
+ .flags = MLX4_INTFF_BONDING
};
static int __init mlx4_ib_init(void)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6eb743f65f6..f829fd935b7 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -110,6 +110,9 @@ struct mlx4_ib_cq {
struct mutex resize_mutex;
struct ib_umem *umem;
struct ib_umem *resize_umem;
+ /* List of qps that it serves.*/
+ struct list_head send_qp_list;
+ struct list_head recv_qp_list;
};
struct mlx4_ib_mr {
@@ -134,10 +137,17 @@ struct mlx4_ib_fmr {
struct mlx4_fmr mfmr;
};
+#define MAX_REGS_PER_FLOW 2
+
+struct mlx4_flow_reg_id {
+ u64 id;
+ u64 mirror;
+};
+
struct mlx4_ib_flow {
struct ib_flow ibflow;
/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
- u64 reg_id[2];
+ struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
};
struct mlx4_ib_wq {
@@ -293,6 +303,9 @@ struct mlx4_ib_qp {
struct mlx4_roce_smac_vlan_info pri;
struct mlx4_roce_smac_vlan_info alt;
u64 reg_id;
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
};
struct mlx4_ib_srq {
@@ -527,6 +540,10 @@ struct mlx4_ib_dev {
struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
/* lock when destroying qp1_proxy and getting netdev events */
struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
+ u8 bond_next_port;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
};
struct ib_event_work {
@@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx4_ib_ah, ibah);
}
+static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
+{
+ dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
+
+ return dev->bond_next_port + 1;
+}
+
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index c36ccbd9a64..e0d271782d0 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
if (!mfrpl->ibfrpl.page_list)
goto err_free;
- mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
+ mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
+ pdev->dev,
size, &mfrpl->map,
GFP_KERNEL);
if (!mfrpl->mapped_page_list)
@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
int size = page_list->max_page_list_len * sizeof (u64);
- dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
+ dma_free_coherent(&dev->dev->persist->pdev->dev, size,
+ mfrpl->mapped_page_list,
mfrpl->map);
kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf000b7ad64..dfc6ca128a7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -40,11 +40,17 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
+#include <linux/mlx4/driver.h>
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
+static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
+ struct mlx4_ib_cq *recv_cq);
+static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
+ struct mlx4_ib_cq *recv_cq);
+
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
};
@@ -93,17 +99,6 @@ enum {
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
@@ -628,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_sqp *sqp;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
+ struct mlx4_ib_cq *mcq;
+ unsigned long flags;
/* When tunneling special qps, we use a plain UD qp */
if (sqpn) {
@@ -838,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->mqp.event = mlx4_ib_qp_event;
if (!*caller_qp)
*caller_qp = qp;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ /* Maintain device to QPs access, needed for further handling
+ * via reset flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling
+ * via reset flow
+ */
+ mcq = to_mcq(init_attr->send_cq);
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+ mcq = to_mcq(init_attr->recv_cq);
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_qpn:
@@ -896,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
if (send_cq == recv_cq) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@@ -912,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
{
if (send_cq == recv_cq) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
}
@@ -963,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
struct mlx4_ib_cq *send_cq, *recv_cq;
+ unsigned long flags;
if (qp->state != IB_QPS_RESET) {
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
@@ -994,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
get_cqs(qp, &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ list_del(&qp->cq_send_list);
+ list_del(&qp->cq_recv_list);
if (!is_user) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
@@ -1006,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_qp_remove(dev->dev, &qp->mqp);
mlx4_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
mlx4_qp_free(dev->dev, &qp->mqp);
@@ -1915,6 +1937,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
+ if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
+ if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+ if ((ibqp->qp_type == IB_QPT_RC) ||
+ (ibqp->qp_type == IB_QPT_UD) ||
+ (ibqp->qp_type == IB_QPT_UC) ||
+ (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+ (ibqp->qp_type == IB_QPT_XRC_INI)) {
+ attr->port_num = mlx4_ib_bond_next_port(dev);
+ }
+ } else {
+ /* no sense in changing port_num
+ * when ports are bonded */
+ attr_mask &= ~IB_QP_PORT;
+ }
+ }
+
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > dev->num_ports)) {
pr_debug("qpn 0x%x: invalid port number (%d) specified "
@@ -1965,6 +2003,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
+ attr->port_num = 1;
+
out:
mutex_unlock(&qp->mutex);
return err;
@@ -2609,8 +2650,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 uninitialized_var(lso_hdr_sz);
__be32 blh;
int i;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
spin_lock_irqsave(&qp->sq.lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
ind = qp->sq_next_wqe;
@@ -2908,10 +2956,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int ind;
int max_gs;
int i;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
max_gs = qp->rq.max_gs;
spin_lock_irqsave(&qp->rq.lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 62d9285300a..dce5dfe3a70 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
int err = 0;
int nreq;
int i;
+ struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
spin_lock_irqsave(&srq->lock, flags);
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
@@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
+out:
spin_unlock_irqrestore(&srq->lock, flags);
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index cb4c66e723b..d10c2b8a5da 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
char base_name[9];
/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
- strlcpy(name, pci_name(dev->dev->pdev), max);
+ strlcpy(name, pci_name(dev->dev->persist->pdev), max);
strncpy(base_name, name, 8); /*till xxxx:yy:*/
base_name[8] = '\0';
/* with no ARI only 3 last bits are used so when the fn is higher than 8
@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev))
return 0;
- for (i = 0; i <= device->dev->num_vfs; ++i)
+ for (i = 0; i <= device->dev->persist->num_vfs; ++i)
register_one_pkey_tree(device, i);
return 0;
@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev))
return;
- for (slave = device->dev->num_vfs; slave >= 0; --slave) {
+ for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
list_for_each_entry_safe(p, t,
&device->pkeys.pkey_port_list[slave],
entry) {
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b56e4c5593e..611a9fdf2f3 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
- m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+ m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
skip = 1 << m;
mask = skip - 1;
base = pfn;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 49eb5111d2c..70acda91eb2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, vlan_tx_tag_get(skb));
+ netdev->name, skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;
@@ -576,11 +576,12 @@ tso_sq_no_longer_full:
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
- if (vlan_tx_tag_present(skb)) {
+ if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
- netdev->name, vlan_tx_tag_get(skb) );
+ netdev->name,
+ skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
- wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;