summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/mmc_block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/mmc_block.c')
-rw-r--r--drivers/mmc/mmc_block.c191
1 files changed, 149 insertions, 42 deletions
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index a0e0dad1b41..87713572293 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -28,10 +28,12 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/protocol.h>
+#include <linux/mmc/host.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -81,7 +83,6 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
put_disk(md->disk);
- mmc_cleanup_queue(&md->queue);
kfree(md);
}
mutex_unlock(&open_lock);
@@ -153,18 +154,84 @@ static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
return stat;
}
+static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+ int err;
+ u32 blocks;
+
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ unsigned int timeout_us;
+
+ struct scatterlist sg;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_APP_CMD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD))
+ return (u32)-1;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ data.timeout_ns = card->csd.tacc_ns * 100;
+ data.timeout_clks = card->csd.tacc_clks * 100;
+
+ timeout_us = data.timeout_ns / 1000;
+ timeout_us += data.timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (timeout_us > 100000) {
+ data.timeout_ns = 100000000;
+ data.timeout_clks = 0;
+ }
+
+ data.blksz = 4;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ sg_init_one(&sg, &blocks, 4);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE)
+ return (u32)-1;
+
+ blocks = ntohl(blocks);
+
+ return blocks;
+}
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- int ret;
+ struct mmc_blk_request brq;
+ int ret = 1;
if (mmc_card_claim_host(card))
- goto cmd_err;
+ goto flush_queue;
do {
- struct mmc_blk_request brq;
struct mmc_command cmd;
+ u32 readcmd, writecmd;
memset(&brq, 0, sizeof(struct mmc_blk_request));
brq.mrq.cmd = &brq.cmd;
@@ -172,7 +239,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.arg = req->sector << 9;
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz_bits = md->block_bits;
brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
brq.stop.opcode = MMC_STOP_TRANSMISSION;
@@ -181,20 +247,34 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
- if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
- brq.data.flags |= MMC_DATA_READ;
- } else {
- brq.cmd.opcode = MMC_WRITE_BLOCK;
- brq.data.flags |= MMC_DATA_WRITE;
+ /*
+ * If the host doesn't support multiple block writes, force
+ * block writes to single block. SD cards are excepted from
+ * this rule as they support querying the number of
+ * successfully written sectors.
+ */
+ if (rq_data_dir(req) != READ &&
+ !(card->host->caps & MMC_CAP_MULTIWRITE) &&
+ !mmc_card_sd(card))
brq.data.blocks = 1;
- }
if (brq.data.blocks > 1) {
brq.data.flags |= MMC_DATA_MULTI;
brq.mrq.stop = &brq.stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
brq.mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+
+ if (rq_data_dir(req) == READ) {
+ brq.cmd.opcode = readcmd;
+ brq.data.flags |= MMC_DATA_READ;
+ } else {
+ brq.cmd.opcode = writecmd;
+ brq.data.flags |= MMC_DATA_WRITE;
}
brq.data.sg = mq->sg;
@@ -219,27 +299,29 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto cmd_err;
}
- do {
- int err;
-
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- goto cmd_err;
- }
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
+ if (rq_data_dir(req) != READ) {
+ do {
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ goto cmd_err;
+ }
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
#if 0
- if (cmd.resp[0] & ~0x00000900)
- printk(KERN_ERR "%s: status = %08x\n",
- req->rq_disk->disk_name, cmd.resp[0]);
- if (mmc_decode_status(cmd.resp))
- goto cmd_err;
+ if (cmd.resp[0] & ~0x00000900)
+ printk(KERN_ERR "%s: status = %08x\n",
+ req->rq_disk->disk_name, cmd.resp[0]);
+ if (mmc_decode_status(cmd.resp))
+ goto cmd_err;
#endif
+ }
/*
* A block was successfully transferred.
@@ -262,19 +344,45 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
return 1;
cmd_err:
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
+ * If the card is not SD, we can still ok written sectors
+ * if the controller can do proper error reporting.
+ *
+ * For reads we just fail the entire chunk as that should
+ * be safe in all cases.
+ */
+ if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
+ u32 blocks;
+ unsigned int bytes;
+
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1) {
+ if (card->csd.write_partial)
+ bytes = blocks << md->block_bits;
+ else
+ bytes = blocks << 9;
+ spin_lock_irq(&md->lock);
+ ret = end_that_request_chunk(req, 1, bytes);
+ spin_unlock_irq(&md->lock);
+ }
+ } else if (rq_data_dir(req) != READ &&
+ (card->host->caps & MMC_CAP_MULTIWRITE)) {
+ spin_lock_irq(&md->lock);
+ ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+
mmc_card_release_host(card);
- /*
- * This is a little draconian, but until we get proper
- * error handling sorted out here, its the best we can
- * do - especially as some hosts have no idea how much
- * data was transferred before the error occurred.
- */
+flush_queue:
spin_lock_irq(&md->lock);
- do {
+ while (ret) {
ret = end_that_request_chunk(req, 0,
req->current_nr_sectors << 9);
- } while (ret);
+ }
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
@@ -443,12 +551,11 @@ static void mmc_blk_remove(struct mmc_card *card)
if (md) {
int devidx;
+ /* Stop new requests from getting into the queue */
del_gendisk(md->disk);
- /*
- * I think this is needed.
- */
- md->disk->queue = NULL;
+ /* Then flush out any already in there */
+ mmc_cleanup_queue(&md->queue);
devidx = md->disk->first_minor >> MMC_SHIFT;
__clear_bit(devidx, dev_use);