summaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax/i2400m/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wimax/i2400m/tx.c')
-rw-r--r--drivers/net/wimax/i2400m/tx.c75
1 files changed, 69 insertions, 6 deletions
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 613a88ffd65..fa16ccf8e26 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -278,6 +278,48 @@ enum {
#define TAIL_FULL ((void *)~(unsigned long)NULL)
/*
+ * Calculate how much tail room is available
+ *
+ * Note the trick here. This path is ONLY caleed for Case A (see
+ * i2400m_tx_fifo_push() below), where we have:
+ *
+ * Case A
+ * N ___________
+ * | tail room |
+ * | |
+ * |<- IN ->|
+ * | |
+ * | data |
+ * | |
+ * |<- OUT ->|
+ * | |
+ * | head room |
+ * 0 -----------
+ *
+ * When calculating the tail_room, tx_in might get to be zero if
+ * i2400m->tx_in is right at the end of the buffer (really full
+ * buffer) if there is no head room. In this case, tail_room would be
+ * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final
+ * mod (%) operation. However, when doing this kind of optimization,
+ * i2400m->tx_in being zero would fail, so we treat is an a special
+ * case.
+ */
+static inline
+size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
+{
+ size_t tail_room;
+ size_t tx_in;
+
+ if (unlikely(i2400m->tx_in) == 0)
+ return I2400M_TX_BUF_SIZE;
+ tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ tail_room = I2400M_TX_BUF_SIZE - tx_in;
+ tail_room %= I2400M_TX_BUF_SIZE;
+ return tail_room;
+}
+
+
+/*
* Allocate @size bytes in the TX fifo, return a pointer to it
*
* @i2400m: device descriptor
@@ -338,7 +380,7 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
return NULL;
}
/* Is there space at the tail? */
- tail_room = I2400M_TX_BUF_SIZE - i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ tail_room = __i2400m_tx_tail_room(i2400m);
if (tail_room < needed_size) {
if (i2400m->tx_out % I2400M_TX_BUF_SIZE
< i2400m->tx_in % I2400M_TX_BUF_SIZE) {
@@ -367,17 +409,29 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
* (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
* header).
*
+ * Tail room can get to be zero if a message was opened when there was
+ * space only for a header. _tx_close() will mark it as to-skip (as it
+ * will have no payloads) and there will be no more space to flush, so
+ * nothing has to be done here. This is probably cheaper than ensuring
+ * in _tx_new() that there is some space for payloads...as we could
+ * always possibly hit the same problem if the payload wouldn't fit.
+ *
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ *
+ * This path is only taken for Case A FIFO situations [see
+ * i2400m_tx_fifo_push()]
*/
static
void i2400m_tx_skip_tail(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
- size_t tail_room = I2400M_TX_BUF_SIZE - tx_in;
+ size_t tail_room = __i2400m_tx_tail_room(i2400m);
struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
+ if (unlikely(tail_room == 0))
+ return;
BUG_ON(tail_room < sizeof(*msg));
msg->size = tail_room | I2400M_TX_SKIP;
d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
@@ -474,10 +528,18 @@ void i2400m_tx_close(struct i2400m *i2400m)
struct i2400m_msg_hdr *tx_msg_moved;
size_t aligned_size, padding, hdr_size;
void *pad_buf;
+ unsigned num_pls;
if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */
goto out;
-
+ num_pls = le16_to_cpu(tx_msg->num_pls);
+ /* We can get this situation when a new message was started
+ * and there was no space to add payloads before hitting the
+ tail (and taking padding into consideration). */
+ if (num_pls == 0) {
+ tx_msg->size |= I2400M_TX_SKIP;
+ goto out;
+ }
/* Relocate the message header
*
* Find the current header size, align it to 16 and if we need
@@ -491,7 +553,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
*/
hdr_size = sizeof(*tx_msg)
+ le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
- hdr_size = ALIGN(hdr_size, I2400M_PL_PAD);
+ hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN);
tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
tx_msg_moved = (void *) tx_msg + tx_msg->offset;
memmove(tx_msg_moved, tx_msg, hdr_size);
@@ -574,7 +636,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
i2400m, buf, buf_len, pl_type);
- padded_len = ALIGN(buf_len, I2400M_PL_PAD);
+ padded_len = ALIGN(buf_len, I2400M_PL_ALIGN);
d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
/* If there is no current TX message, create one; if the
* current one is out of payload slots or we have a singleton,
@@ -591,6 +653,8 @@ try_new:
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
}
+ if (i2400m->tx_msg == NULL)
+ goto error_tx_new;
if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
d_printf(2, dev, "TX: message too big, going new\n");
i2400m_tx_close(i2400m);
@@ -773,7 +837,6 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
i2400m->tx_out %= I2400M_TX_BUF_SIZE;
i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
- netif_start_queue(i2400m->wimax_dev.net_dev);
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}