diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2008-04-16 21:09:29 -0700 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-16 21:09:29 -0700 |
commit | 9355fb6a064723c71e80e9c78de3140b43bfb52d (patch) | |
tree | dd0fffeb6633aed6cb2c946a05bf33e05f2e9436 /drivers/infiniband/hw/ipath/ipath_intr.c | |
parent | 2ba3f56eb402672ff83601b5990b219d39577636 (diff) |
IB/ipath: Add support for 7220 receive queue changes
Newer HCAs have a HW option to write a sequence number to each receive
queue entry and avoid a separate DMA of the tail register to memory.
This patch adds support for these changes.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_intr.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_intr.c | 46 |
1 files changed, 20 insertions, 26 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 41329e72392..826b96b3995 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c @@ -695,8 +695,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) struct ipath_portdata *pd = dd->ipath_pd[i]; if (i == 0) { hd = pd->port_head; - tl = (u32) le64_to_cpu( - *dd->ipath_hdrqtailptr); + tl = ipath_get_hdrqtail(pd); } else if (pd && pd->port_cnt && pd->port_rcvhdrtail_kvaddr) { /* @@ -732,8 +731,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) * vs user) */ ipath_stats.sps_etidfull++; - if (pd->port_head != - (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) + if (pd->port_head != ipath_get_hdrqtail(pd)) chkerrpkts = 1; } @@ -952,7 +950,7 @@ set: * process was waiting for a packet to arrive, and didn't want * to poll */ -static void handle_urcv(struct ipath_devdata *dd, u32 istat) +static void handle_urcv(struct ipath_devdata *dd, u64 istat) { u64 portr; int i; @@ -968,10 +966,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) * and ipath_poll_next()... */ rmb(); - portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & - dd->ipath_i_rcvavail_mask) - | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & - dd->ipath_i_rcvurg_mask); + portr = ((istat >> dd->ipath_i_rcvavail_shift) & + dd->ipath_i_rcvavail_mask) | + ((istat >> dd->ipath_i_rcvurg_shift) & + dd->ipath_i_rcvurg_mask); for (i = 1; i < dd->ipath_cfgports; i++) { struct ipath_portdata *pd = dd->ipath_pd[i]; @@ -991,7 +989,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) } if (rcvdint) { /* only want to take one interrupt, so turn off the rcv - * interrupt for all the ports that we did the wakeup on + * interrupt for all the ports that we set the rcv_waiting * (but never for kernel port) */ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, @@ -1006,8 +1004,7 @@ irqreturn_t ipath_intr(int irq, void *data) ipath_err_t estat = 0; irqreturn_t ret; static unsigned unexpected = 0; - static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | - (1U<<INFINIPATH_I_RCVURG_SHIFT); + u64 kportrbits; ipath_stats.sps_ints++; @@ -1076,9 +1073,7 @@ irqreturn_t ipath_intr(int irq, void *data) ipath_dev_err(dd, "Read of error status failed " "(all bits set); ignoring\n"); else - if (handle_errors(dd, estat)) - /* force calling ipath_kreceive() */ - chk0rcv = 1; + chk0rcv |= handle_errors(dd, estat); } if (istat & INFINIPATH_I_GPIO) { @@ -1158,7 +1153,6 @@ irqreturn_t ipath_intr(int irq, void *data) (u64) to_clear); } } - chk0rcv |= istat & port0rbits; /* * Clear the interrupt bits we found set, unless they are receive @@ -1171,20 +1165,20 @@ irqreturn_t ipath_intr(int irq, void *data) ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); /* - * handle port0 receive before checking for pio buffers available, - * since receives can overflow; piobuf waiters can afford a few - * extra cycles, since they were waiting anyway, and user's waiting - * for receive are at the bottom. + * Handle kernel receive queues before checking for pio buffers + * available since receives can overflow; piobuf waiters can afford + * a few extra cycles, since they were waiting anyway, and user's + * waiting for receive are at the bottom. */ - if (chk0rcv) { + kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) | + (1ULL << dd->ipath_i_rcvurg_shift); + if (chk0rcv || (istat & kportrbits)) { + istat &= ~kportrbits; ipath_kreceive(dd->ipath_pd[0]); - istat &= ~port0rbits; } - if (istat & ((dd->ipath_i_rcvavail_mask << - INFINIPATH_I_RCVAVAIL_SHIFT) - | (dd->ipath_i_rcvurg_mask << - INFINIPATH_I_RCVURG_SHIFT))) + if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) | + (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift))) handle_urcv(dd, istat); if (istat & INFINIPATH_I_SPIOBUFAVAIL) { |