summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/block.c681
-rw-r--r--drivers/mmc/card/mmc_test.c498
-rw-r--r--drivers/mmc/card/queue.c217
-rw-r--r--drivers/mmc/card/queue.h33
4 files changed, 1112 insertions, 317 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e4222455..1ff5486213f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -106,6 +106,16 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_RETRY,
+ MMC_BLK_RETRY_SINGLE,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_ABORT,
+};
+
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@@ -427,14 +437,6 @@ static const struct block_device_operations mmc_bdops = {
#endif
};
-struct mmc_blk_request {
- struct mmc_request mrq;
- struct mmc_command sbc;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-};
-
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
@@ -525,7 +527,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static u32 get_card_status(struct mmc_card *card, struct request *req)
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
struct mmc_command cmd = {0};
int err;
@@ -534,11 +549,141 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+#define ERR_RETRY 2
+#define ERR_ABORT 1
+#define ERR_CONTINUE 0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ bool status_valid, u32 status)
+{
+ switch (error) {
+ case -EILSEQ:
+ /* response crc error, retry the r/w cmd */
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "response CRC error",
+ name, status);
+ return ERR_RETRY;
+
+ case -ETIMEDOUT:
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "timed out", name, status);
+
+ /* If the status cmd initially failed, retry the r/w cmd */
+ if (!status_valid)
+ return ERR_RETRY;
+
+ /*
+ * If it was a r/w cmd crc error, or illegal command
+ * (eg, issued in wrong state) then retry - we should
+ * have corrected the state problem above.
+ */
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ return ERR_RETRY;
+
+ /* Otherwise abort the command */
+ return ERR_ABORT;
+
+ default:
+ /* We don't understand the error code the driver gave us */
+ pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ req->rq_disk->disk_name, error, status);
+ return ERR_ABORT;
+ }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state. Essentially, we do this as follows:
+ * - Obtain card status. If the first attempt to obtain card status fails,
+ * the status word will reflect the failed status cmd, not the failed
+ * r/w cmd. If we fail to obtain card status, it suggests we can no
+ * longer communicate with the card.
+ * - Check the card state. If the card received the cmd but there was a
+ * transient problem with the response, it might still be in a data transfer
+ * mode. Try to send it a stop command. If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ * transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ * illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ struct mmc_blk_request *brq)
+{
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+ int err, retry;
+
+ /*
+ * Try to get card status which indicates both the card state
+ * and why there was no response. If the first attempt fails,
+ * we can't be sure the returned status is for the r/w command.
+ */
+ for (retry = 2; retry >= 0; retry--) {
+ err = get_card_status(card, &status, 0);
+ if (!err)
+ break;
+
+ prev_cmd_status_valid = false;
+ pr_err("%s: error %d sending status command, %sing\n",
+ req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+ }
+
+ /* We couldn't get a response from the card. Give up. */
if (err)
- printk(KERN_ERR "%s: error %d sending status command",
- req->rq_disk->disk_name, err);
- return cmd.resp[0];
+ return ERR_ABORT;
+
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+ R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+ err = send_stop(card, &stop_status);
+ if (err)
+ pr_err("%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, err);
+
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
+ if (err)
+ return ERR_ABORT;
+ }
+
+ /* Check for set block count errors */
+ if (brq->sbc.error)
+ return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+ prev_cmd_status_valid, status);
+
+ /* Check for r/w command errors */
+ if (brq->cmd.error)
+ return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+ prev_cmd_status_valid, status);
+
+ /* Now for stop errors. These aren't fatal to the transfer. */
+ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->stop.error,
+ brq->cmd.resp[0], status);
+
+ /*
+ * Subsitute in our own stop status as this will give the error
+ * state which happened during the execution of the r/w command.
+ */
+ if (stop_status) {
+ brq->stop.resp[0] = stop_status;
+ brq->stop.error = 0;
+ }
+ return ERR_CONTINUE;
}
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -669,240 +814,324 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+#define CMD_ERRORS \
+ (R1_OUT_OF_RANGE | /* Command argument out of range */ \
+ R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
{
- struct mmc_blk_data *md = mq->data;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request brq;
- int ret = 1, disable_multi = 0;
+ enum mmc_blk_status ret = MMC_BLK_SUCCESS;
+ struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
/*
- * Reliable writes are used to implement Forced Unit Access and
- * REQ_META accesses, and are supported only on MMCs.
+ * sbc.error indicates a problem with the set block count
+ * command. No data will have been transferred.
+ *
+ * cmd.error indicates a problem with the r/w command. No
+ * data will have been transferred.
+ *
+ * stop.error indicates a problem with the stop command. Data
+ * may have been transferred, or may still be transferring.
*/
- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
- (req->cmd_flags & REQ_META)) &&
- (rq_data_dir(req) == WRITE) &&
- (md->flags & MMC_BLK_REL_WR);
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+ return MMC_BLK_ABORT;
+ case ERR_CONTINUE:
+ break;
+ }
+ }
- do {
- struct mmc_command cmd = {0};
- u32 readcmd, writecmd, status = 0;
-
- memset(&brq, 0, sizeof(struct mmc_blk_request));
- brq.mrq.cmd = &brq.cmd;
- brq.mrq.data = &brq.data;
-
- brq.cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq.cmd.arg <<= 9;
- brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 512;
- brq.stop.opcode = MMC_STOP_TRANSMISSION;
- brq.stop.arg = 0;
- brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = blk_rq_sectors(req);
+ /*
+ * Check for errors relating to the execution of the
+ * initial command - such as address errors. No data
+ * has been transferred.
+ */
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
+ pr_err("%s: r/w command failed, status = %#x\n",
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
+ return MMC_BLK_ABORT;
+ }
- /*
- * The block layer doesn't support all sector count
- * restrictions, so we need to be prepared for too big
- * requests.
- */
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ /*
+ * Everything else is either success, or a data error of some
+ * kind. If it was a write, we may have transitioned to
+ * program mode, which we have to wait for it to complete.
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ u32 status;
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ }
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq.data.blocks > 1)
- brq.data.blocks = 1;
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req),
+ brq->cmd.resp[0], brq->stop.resp[0]);
- if (brq.data.blocks > 1 || do_rel_wr) {
- /* SPI multiblock writes terminate using a special
- * token, not a STOP_TRANSMISSION request.
- */
- if (!mmc_host_is_spi(card->host) ||
- rq_data_dir(req) == READ)
- brq.mrq.stop = &brq.stop;
- readcmd = MMC_READ_MULTIPLE_BLOCK;
- writecmd = MMC_WRITE_MULTIPLE_BLOCK;
- } else {
- brq.mrq.stop = NULL;
- readcmd = MMC_READ_SINGLE_BLOCK;
- writecmd = MMC_WRITE_BLOCK;
- }
if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = readcmd;
- brq.data.flags |= MMC_DATA_READ;
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY_SINGLE;
+ }
+ return MMC_BLK_DATA_ERR;
} else {
- brq.cmd.opcode = writecmd;
- brq.data.flags |= MMC_DATA_WRITE;
+ return MMC_BLK_CMD_ERR;
}
+ }
- if (do_rel_wr)
- mmc_apply_rel_rw(&brq, card, req);
+ if (ret == MMC_BLK_SUCCESS &&
+ blk_rq_bytes(req) != brq->data.bytes_xfered)
+ ret = MMC_BLK_PARTIAL;
- /*
- * Pre-defined multi-block transfers are preferable to
- * open ended-ones (and necessary for reliable writes).
- * However, it is not sufficient to just send CMD23,
- * and avoid the final CMD12, as on an error condition
- * CMD12 (stop) needs to be sent anyway. This, coupled
- * with Auto-CMD23 enhancements provided by some
- * hosts, means that the complexity of dealing
- * with this is best left to the host. If CMD23 is
- * supported by card and host, we'll fill sbc in and let
- * the host deal with handling it correctly. This means
- * that for hosts that don't expose MMC_CAP_CMD23, no
- * change of behavior will be observed.
- *
- * N.B: Some MMC cards experience perf degradation.
- * We'll avoid using CMD23-bounded multiblock writes for
- * these, while retaining features like reliable writes.
- */
+ return ret;
+}
- if ((md->flags & MMC_BLK_CMD23) &&
- mmc_op_multi(brq.cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
- brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq.sbc.arg = brq.data.blocks |
- (do_rel_wr ? (1 << 31) : 0);
- brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
- brq.mrq.sbc = &brq.sbc;
- }
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
- mmc_set_data_timeout(&brq.data, card);
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
+ */
+ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+ (req->cmd_flags & REQ_META)) &&
+ (rq_data_dir(req) == WRITE) &&
+ (md->flags & MMC_BLK_REL_WR);
- brq.data.sg = mq->sg;
- brq.data.sg_len = mmc_queue_map_sg(mq);
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
- /*
- * Adjust the sg list so it is the same size as the
- * request.
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq->data.blocks = blk_rq_sectors(req);
+
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
+
+ /*
+ * After a read error, we redo the request one sector at a time
+ * in order to accurately determine which sectors can be read
+ * successfully.
+ */
+ if (disable_multi && brq->data.blocks > 1)
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
*/
- if (brq.data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq.data.blocks << 9;
- struct scatterlist *sg;
-
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
- }
- brq.data.sg_len = i;
- }
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ if (rq_data_dir(req) == READ) {
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
+ } else {
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
+ }
- mmc_queue_bounce_pre(mq);
+ if (do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
- mmc_wait_for_req(card->host, &brq.mrq);
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
- mmc_queue_bounce_post(mq);
+ if ((md->flags & MMC_BLK_CMD23) &&
+ mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
- /*
- * Check for errors here, but don't jump to cmd_err
- * until later as we need to wait for the card to leave
- * programming mode even when things go wrong.
- */
- if (brq.sbc.error || brq.cmd.error ||
- brq.data.error || brq.stop.error) {
- if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
- /* Redo read one sector at a time */
- printk(KERN_WARNING "%s: retrying using single "
- "block read\n", req->rq_disk->disk_name);
- disable_multi = 1;
- continue;
- }
- status = get_card_status(card, req);
- }
+ mmc_set_data_timeout(&brq->data, card);
- if (brq.sbc.error) {
- printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.sbc.error,
- brq.sbc.resp[0], status);
- }
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
- if (brq.cmd.error) {
- printk(KERN_ERR "%s: error %d sending read/write "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.cmd.error,
- brq.cmd.resp[0], status);
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
}
+ brq->data.sg_len = i;
+ }
- if (brq.data.error) {
- if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
- /* 'Stop' response contains card status */
- status = brq.mrq.stop->resp[0];
- printk(KERN_ERR "%s: error %d transferring data,"
- " sector %u, nr %u, card status %#x\n",
- req->rq_disk->disk_name, brq.data.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req), status);
- }
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_err_check;
- if (brq.stop.error) {
- printk(KERN_ERR "%s: error %d sending stop command, "
- "response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.stop.error,
- brq.stop.resp[0], status);
- }
+ mmc_queue_bounce_pre(mqrq);
+}
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- do {
- int err;
-
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- goto cmd_err;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
- if (cmd.resp[0] & ~0x00000900)
- printk(KERN_ERR "%s: status = %08x\n",
- req->rq_disk->disk_name, cmd.resp[0]);
- if (mmc_decode_status(cmd.resp))
- goto cmd_err;
-#endif
- }
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ int ret = 1, disable_multi = 0, retry = 0;
+ enum mmc_blk_status status;
+ struct mmc_queue_req *mq_rq;
+ struct request *req;
+ struct mmc_async_req *areq;
+
+ if (!rqc && !mq->mqrq_prev->req)
+ return 0;
- if (brq.cmd.error || brq.stop.error || brq.data.error) {
- if (rq_data_dir(req) == READ) {
+ do {
+ if (rqc) {
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ areq = &mq->mqrq_cur->mmc_active;
+ } else
+ areq = NULL;
+ areq = mmc_start_req(card->host, areq, (int *) &status);
+ if (!areq)
+ return 0;
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ brq = &mq_rq->brq;
+ req = mq_rq->req;
+ mmc_queue_bounce_post(mq_rq);
+
+ switch (status) {
+ case MMC_BLK_SUCCESS:
+ case MMC_BLK_PARTIAL:
+ /*
+ * A block was successfully transferred.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0,
+ brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ if (status == MMC_BLK_SUCCESS && ret) {
/*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
+ * The blk_end_request has returned non zero
+ * even though all data is transfered and no
+ * erros returned by host.
+ * If this happen it's a bug.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO, brq.data.blksz);
- spin_unlock_irq(&md->lock);
- continue;
+ printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+ __func__, blk_rq_bytes(req),
+ brq->data.bytes_xfered);
+ rqc = NULL;
+ goto cmd_abort;
}
+ break;
+ case MMC_BLK_CMD_ERR:
goto cmd_err;
+ case MMC_BLK_RETRY_SINGLE:
+ disable_multi = 1;
+ break;
+ case MMC_BLK_RETRY:
+ if (retry++ < 5)
+ break;
+ case MMC_BLK_ABORT:
+ goto cmd_abort;
+ case MMC_BLK_DATA_ERR:
+ /*
+ * After an error, we redo I/O one sector at a
+ * time, so we only reach here after trying to
+ * read a single sector.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, -EIO,
+ brq->data.blksz);
+ spin_unlock_irq(&md->lock);
+ if (!ret)
+ goto start_new_req;
+ break;
}
- /*
- * A block was successfully transferred.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ if (ret) {
+ /*
+ * In case of a none complete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ }
} while (ret);
return 1;
@@ -927,15 +1156,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
}
} else {
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
}
+ cmd_abort:
spin_lock_irq(&md->lock);
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);
+ start_new_req:
+ if (rqc) {
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+ }
+
return 0;
}
@@ -945,26 +1181,37 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- mmc_claim_host(card->host);
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+ mmc_claim_host(card->host);
+
ret = mmc_blk_part_switch(card, md);
if (ret) {
ret = 0;
goto out;
}
- if (req->cmd_flags & REQ_DISCARD) {
+ if (req && req->cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
if (req->cmd_flags & REQ_SECURE)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req->cmd_flags & REQ_FLUSH) {
+ } else if (req && req->cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
- mmc_release_host(card->host);
+ if (!req)
+ /* release host only when there are no more requests */
+ mmc_release_host(card->host);
return ret;
}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 233cdfae92f..006a5e9f8ab 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -148,6 +148,27 @@ struct mmc_test_card {
struct mmc_test_general_result *gr;
};
+enum mmc_test_prep_media {
+ MMC_TEST_PREP_NONE = 0,
+ MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+ MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+ unsigned int *sg_len;
+ unsigned int *bs;
+ unsigned int len;
+ unsigned int size;
+ bool do_write;
+ bool do_nonblock_req;
+ enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+ struct mmc_async_req areq;
+ struct mmc_test_card *test;
+};
+
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -367,21 +388,26 @@ out_free:
* Map memory into a scatterlist. Optionally allow the same memory to be
* mapped more than once.
*/
-static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
struct scatterlist *sglist, int repeat,
unsigned int max_segs, unsigned int max_seg_sz,
- unsigned int *sg_len)
+ unsigned int *sg_len, int min_sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
+ unsigned long sz = size;
sg_init_table(sglist, max_segs);
+ if (min_sg_len > max_segs)
+ min_sg_len = max_segs;
*sg_len = 0;
do {
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
+ if (min_sg_len && (size / min_sg_len < len))
+ len = ALIGN(size / min_sg_len, 512);
if (len > sz)
len = sz;
if (len > max_seg_sz)
@@ -554,11 +580,12 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%lu.%09lu seconds (%u kB/s, %u KiB/s, "
- "%u.%02u IOPS)\n",
+ "%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024, iops / 100, iops % 100);
+ rate / 1000, rate / 1024, iops / 100, iops % 100,
+ test->area.sg_len);
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
@@ -661,7 +688,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
* Checks that a normal transfer didn't have any errors
*/
static int mmc_test_check_result(struct mmc_test_card *test,
- struct mmc_request *mrq)
+ struct mmc_request *mrq)
{
int ret;
@@ -685,6 +712,17 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
+static int mmc_test_check_result_async(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_test_async_req *test_async =
+ container_of(areq, struct mmc_test_async_req, areq);
+
+ mmc_test_wait_busy(test_async->test);
+
+ return mmc_test_check_result(test_async->test, areq->mrq);
+}
+
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -720,6 +758,85 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
}
/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+ struct mmc_command *cmd,
+ struct mmc_command *stop,
+ struct mmc_data *data)
+{
+ memset(mrq, 0, sizeof(struct mmc_request));
+ memset(cmd, 0, sizeof(struct mmc_command));
+ memset(data, 0, sizeof(struct mmc_data));
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ mrq->cmd = cmd;
+ mrq->data = data;
+ mrq->stop = stop;
+}
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks,
+ unsigned blksz, int write, int count)
+{
+ struct mmc_request mrq1;
+ struct mmc_command cmd1;
+ struct mmc_command stop1;
+ struct mmc_data data1;
+
+ struct mmc_request mrq2;
+ struct mmc_command cmd2;
+ struct mmc_command stop2;
+ struct mmc_data data2;
+
+ struct mmc_test_async_req test_areq[2];
+ struct mmc_async_req *done_areq;
+ struct mmc_async_req *cur_areq = &test_areq[0].areq;
+ struct mmc_async_req *other_areq = &test_areq[1].areq;
+ int i;
+ int ret;
+
+ test_areq[0].test = test;
+ test_areq[1].test = test;
+
+ mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+ mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+
+ cur_areq->mrq = &mrq1;
+ cur_areq->err_check = mmc_test_check_result_async;
+ other_areq->mrq = &mrq2;
+ other_areq->err_check = mmc_test_check_result_async;
+
+ for (i = 0; i < count; i++) {
+ mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+
+ if (ret || (!done_areq && i > 0))
+ goto err;
+
+ if (done_areq) {
+ if (done_areq->mrq == &mrq2)
+ mmc_test_nonblock_reset(&mrq2, &cmd2,
+ &stop2, &data2);
+ else
+ mmc_test_nonblock_reset(&mrq1, &cmd1,
+ &stop1, &data1);
+ }
+ done_areq = cur_areq;
+ cur_areq = other_areq;
+ other_areq = done_areq;
+ dev_addr += blocks;
+ }
+
+ done_areq = mmc_start_req(test->card->host, NULL, &ret);
+
+ return ret;
+err:
+ return ret;
+}
+
+/*
* Tests a basic transfer with certain parameters
*/
static int mmc_test_simple_transfer(struct mmc_test_card *test,
@@ -1302,7 +1419,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
- int max_scatter)
+ int max_scatter, int min_sg_len)
{
struct mmc_test_area *t = &test->area;
int err;
@@ -1315,7 +1432,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
&t->sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
- t->max_seg_sz, &t->sg_len);
+ t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err)
printk(KERN_INFO "%s: Failed to map sg list\n",
@@ -1336,14 +1453,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,
}
/*
- * Map and transfer bytes.
+ * Map and transfer bytes for multiple transfers.
*/
-static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
- unsigned int dev_addr, int write, int max_scatter,
- int timed)
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write,
+ int max_scatter, int timed, int count,
+ bool nonblock, int min_sg_len)
{
struct timespec ts1, ts2;
- int ret;
+ int ret = 0;
+ int i;
+ struct mmc_test_area *t = &test->area;
/*
* In the case of a maximally scattered transfer, the maximum transfer
@@ -1361,14 +1481,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
sz = max_tfr;
}
- ret = mmc_test_area_map(test, sz, max_scatter);
+ ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
if (ret)
return ret;
if (timed)
getnstimeofday(&ts1);
+ if (nonblock)
+ ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+ dev_addr, t->blocks, 512, write, count);
+ else
+ for (i = 0; i < count && ret == 0; i++) {
+ ret = mmc_test_area_transfer(test, dev_addr, write);
+ dev_addr += sz >> 9;
+ }
- ret = mmc_test_area_transfer(test, dev_addr, write);
if (ret)
return ret;
@@ -1376,11 +1503,19 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
getnstimeofday(&ts2);
if (timed)
- mmc_test_print_rate(test, sz, &ts1, &ts2);
+ mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
return 0;
}
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write, int max_scatter,
+ int timed)
+{
+ return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+ timed, 1, false, 0);
+}
+
/*
* Write the test area entirely.
*/
@@ -1954,6 +2089,245 @@ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
return mmc_test_large_seq_perf(test, 1);
}
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *tdata,
+ unsigned int reqsize, unsigned int size,
+ int min_sg_len)
+{
+ unsigned int dev_addr;
+ struct mmc_test_area *t = &test->area;
+ int ret = 0;
+
+ /* Set up test area */
+ if (size > mmc_test_capacity(test->card) / 2 * 512)
+ size = mmc_test_capacity(test->card) / 2 * 512;
+ if (reqsize > t->max_tfr)
+ reqsize = t->max_tfr;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if ((dev_addr & 0xffff0000))
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+ else
+ dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+ if (!dev_addr)
+ goto err;
+
+ if (reqsize > size)
+ return 0;
+
+ /* prepare test area */
+ if (mmc_can_erase(test->card) &&
+ tdata->prepare & MMC_TEST_PREP_ERASE) {
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_SECURE_ERASE_ARG);
+ if (ret)
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_ERASE_ARG);
+ if (ret)
+ goto err;
+ }
+
+ /* Run test */
+ ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+ tdata->do_write, 0, 1, size / reqsize,
+ tdata->do_nonblock_req, min_sg_len);
+ if (ret)
+ goto err;
+
+ return ret;
+ err:
+ printk(KERN_INFO "[%s] error\n", __func__);
+ return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+ void *pre_req = test->card->host->ops->pre_req;
+ void *post_req = test->card->host->ops->post_req;
+
+ if (rw->do_nonblock_req &&
+ ((!pre_req && post_req) || (pre_req && !post_req))) {
+ printk(KERN_INFO "error: only one of pre/post is defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+ rw->sg_len[i]);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2221,6 +2595,61 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Write performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2445,6 +2874,32 @@ static const struct file_operations mmc_test_fops_test = {
.release = single_release,
};
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+ int i;
+
+ mutex_lock(&mmc_test_lock);
+
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+ seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtf_testlist_show, inode->i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+ .open = mtf_testlist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void mmc_test_free_file_test(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
@@ -2476,7 +2931,18 @@ static int mmc_test_register_file_test(struct mmc_card *card)
if (IS_ERR_OR_NULL(file)) {
dev_err(&card->dev,
- "Can't create file. Perhaps debugfs is disabled.\n");
+ "Can't create test. Perhaps debugfs is disabled.\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (card->debugfs_root)
+ file = debugfs_create_file("testlist", S_IRUGO,
+ card->debugfs_root, card, &mmc_test_fops_testlist);
+
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(&card->dev,
+ "Can't create testlist. Perhaps debugfs is disabled.\n");
ret = -ENODEV;
goto err;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa318d..45fb362e3f0 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -52,14 +52,18 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
+ struct mmc_queue_req *tmp;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
- mq->req = req;
+ mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
- if (!req) {
+ if (req || mq->mqrq_prev->req) {
+ set_current_state(TASK_RUNNING);
+ mq->issue_fn(mq, req);
+ } else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
@@ -67,11 +71,14 @@ static int mmc_queue_thread(void *d)
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
- continue;
}
- set_current_state(TASK_RUNNING);
- mq->issue_fn(mq, req);
+ /* Current request becomes previous request and vice versa. */
+ mq->mqrq_prev->brq.mrq.data = NULL;
+ mq->mqrq_prev->req = NULL;
+ tmp = mq->mqrq_prev;
+ mq->mqrq_prev = mq->mqrq_cur;
+ mq->mqrq_cur = tmp;
} while (1);
up(&mq->thread_sem);
@@ -97,10 +104,46 @@ static void mmc_request(struct request_queue *q)
return;
}
- if (!mq->req)
+ if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
+struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ if (!sg)
+ *err = -ENOMEM;
+ else {
+ *err = 0;
+ sg_init_table(sg, sg_len);
+ }
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0)
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -116,6 +159,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
@@ -125,21 +170,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue)
return -ENOMEM;
+ memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+ memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+ mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
- mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- if (mmc_can_erase(card)) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
- mq->queue->limits.max_discard_sectors = UINT_MAX;
- if (card->erased_byte == 0)
- mq->queue->limits.discard_zeroes_data = 1;
- mq->queue->limits.discard_granularity = card->pref_erase << 9;
- if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
- mq->queue);
- }
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
@@ -155,53 +195,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
- mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mq->bounce_buf) {
+ mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_cur->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce cur buffer\n",
+ mmc_card_name(card));
+ }
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
- "allocate bounce buffer\n",
+ "allocate bounce prev buffer\n",
mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
}
}
- if (mq->bounce_buf) {
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kmalloc(sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, 1);
- mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
- bouncesz / 512, GFP_KERNEL);
- if (!mq->bounce_sg) {
- ret = -ENOMEM;
+ mqrq_cur->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
- if (!mq->bounce_buf) {
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_segs, GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, host->max_segs);
}
sema_init(&mq->thread_sem, 1);
@@ -216,16 +267,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return 0;
free_bounce_sg:
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
cleanup_queue:
- if (mq->sg)
- kfree(mq->sg);
- mq->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -234,6 +291,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -247,16 +306,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
- kfree(mq->sg);
- mq->sg = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
mq->card = NULL;
}
@@ -309,27 +375,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
-unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
- if (!mq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+ if (!mqrq->bounce_buf)
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- BUG_ON(!mq->bounce_sg);
+ BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
- mq->bounce_sg_len = sg_len;
+ mqrq->bounce_sg_len = sg_len;
buflen = 0;
- for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, buflen);
+ sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
@@ -338,31 +404,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
-void mmc_queue_bounce_pre(struct mmc_queue *mq)
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != WRITE)
+ if (rq_data_dir(mqrq->req) != WRITE)
return;
- sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
-void mmc_queue_bounce_post(struct mmc_queue *mq)
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != READ)
+ if (rq_data_dir(mqrq->req) != READ)
return;
- sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
-
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6223ef8dc9c..d2a1eb4b9f9 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,19 +4,35 @@
struct request;
struct task_struct;
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+struct mmc_queue_req {
+ struct request *req;
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+ struct mmc_async_req mmc_active;
+};
+
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
- struct request *req;
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
- struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
+ struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -25,8 +41,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
-extern void mmc_queue_bounce_pre(struct mmc_queue *);
-extern void mmc_queue_bounce_post(struct mmc_queue *);
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
#endif