summaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/imxmmc.c69
-rw-r--r--drivers/mmc/mmc.c55
-rw-r--r--drivers/mmc/mmc_block.c60
3 files changed, 93 insertions, 91 deletions
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 7ca9e95bdf8..fb6565b98f3 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -91,6 +91,8 @@ struct imxmci_host {
int dma_allocated;
unsigned char actual_bus_width;
+
+ int prev_cmd_code;
};
#define IMXMCI_PEND_IRQ_b 0
@@ -248,16 +250,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
* partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
* This is required for SCR read at least.
*/
- if (datasz < 64) {
+ if (datasz < 512) {
host->dma_size = datasz;
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
/* Hack to enable read SCR */
- if(datasz < 16) {
- MMC_NOB = 1;
- MMC_BLK_LEN = 16;
- }
+ MMC_NOB = 1;
+ MMC_BLK_LEN = 512;
} else {
host->dma_dir = DMA_TO_DEVICE;
}
@@ -409,6 +409,9 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
spin_unlock_irqrestore(&host->lock, flags);
+ if(req && req->cmd)
+ host->prev_cmd_code = req->cmd->opcode;
+
host->req = NULL;
host->cmd = NULL;
host->data = NULL;
@@ -553,7 +556,6 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
{
int i;
int burst_len;
- int flush_len;
int trans_done = 0;
unsigned int stat = *pstat;
@@ -566,44 +568,43 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
stat);
+ udelay(20); /* required for clocks < 8MHz*/
+
if(host->dma_dir == DMA_FROM_DEVICE) {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
- 20, "imxmci_cpu_driven_data read");
+ 50, "imxmci_cpu_driven_data read");
while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
- (host->data_cnt < host->dma_size)) {
- if(burst_len >= host->dma_size - host->data_cnt) {
- flush_len = burst_len;
- burst_len = host->dma_size - host->data_cnt;
- flush_len -= burst_len;
- host->data_cnt = host->dma_size;
- trans_done = 1;
- } else {
- flush_len = 0;
- host->data_cnt += burst_len;
- }
+ (host->data_cnt < 512)) {
+
+ udelay(20); /* required for clocks < 8MHz*/
for(i = burst_len; i>=2 ; i-=2) {
- *(host->data_ptr++) = MMC_BUFFER_ACCESS;
- udelay(20); /* required for clocks < 8MHz*/
+ u16 data;
+ data = MMC_BUFFER_ACCESS;
+ udelay(10); /* required for clocks < 8MHz*/
+ if(host->data_cnt+2 <= host->dma_size) {
+ *(host->data_ptr++) = data;
+ } else {
+ if(host->data_cnt < host->dma_size)
+ *(u8*)(host->data_ptr) = data;
+ }
+ host->data_cnt += 2;
}
- if(i == 1)
- *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
-
stat = MMC_STATUS;
- /* Flush extra bytes from FIFO */
- while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
- i = MMC_BUFFER_ACCESS;
- stat = MMC_STATUS;
- stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
- }
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
- burst_len, stat);
+ dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
+ host->data_cnt, burst_len, stat);
}
+
+ if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
+ trans_done = 1;
+
+ if(host->dma_size & 0x1ff)
+ stat &= ~STATUS_CRC_READ_ERR;
+
} else {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FE,
@@ -692,8 +693,8 @@ static void imxmci_tasklet_fnc(unsigned long data)
what, stat, MMC_INT_MASK);
dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
- dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n",
- host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size);
+ dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
+ host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
}
if(!host->present || timeout)
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 33525bdf2ab..74eaaee66de 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -247,6 +247,55 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
+/**
+ * mmc_set_data_timeout - set the timeout for a data command
+ * @data: data phase for command
+ * @card: the MMC card associated with the data transfer
+ * @write: flag to differentiate reads from writes
+ */
+void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
+ int write)
+{
+ unsigned int mult;
+
+ /*
+ * SD cards use a 100 multiplier rather than 10
+ */
+ mult = mmc_card_sd(card) ? 100 : 10;
+
+ /*
+ * Scale up the multiplier (and therefore the timeout) by
+ * the r2w factor for writes.
+ */
+ if (write)
+ mult <<= card->csd.r2w_factor;
+
+ data->timeout_ns = card->csd.tacc_ns * mult;
+ data->timeout_clks = card->csd.tacc_clks * mult;
+
+ /*
+ * SD cards also have an upper limit on the timeout.
+ */
+ if (mmc_card_sd(card)) {
+ unsigned int timeout_us, limit_us;
+
+ timeout_us = data->timeout_ns / 1000;
+ timeout_us += data->timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (write)
+ limit_us = 250000;
+ else
+ limit_us = 100000;
+
+ if (timeout_us > limit_us) {
+ data->timeout_ns = limit_us * 1000;
+ data->timeout_clks = 0;
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_set_data_timeout);
+
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
/**
@@ -908,11 +957,9 @@ static void mmc_read_scrs(struct mmc_host *host)
{
int err;
struct mmc_card *card;
-
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_data data;
-
struct scatterlist sg;
list_for_each_entry(card, &host->cards, node) {
@@ -947,8 +994,8 @@ static void mmc_read_scrs(struct mmc_host *host)
memset(&data, 0, sizeof(struct mmc_data));
- data.timeout_ns = card->csd.tacc_ns * 10;
- data.timeout_clks = card->csd.tacc_clks * 10;
+ mmc_set_data_timeout(&data, card, 0);
+
data.blksz_bits = 3;
data.blksz = 1 << 3;
data.blocks = 1;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 115cc21094b..a0e0dad1b41 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -30,6 +30,7 @@
#include <linux/mutex.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/protocol.h>
#include <asm/system.h>
@@ -171,8 +172,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.arg = req->sector << 9;
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.timeout_ns = card->csd.tacc_ns * 10;
- brq.data.timeout_clks = card->csd.tacc_clks * 10;
brq.data.blksz_bits = md->block_bits;
brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
@@ -180,6 +179,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
+
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
brq.data.flags |= MMC_DATA_READ;
@@ -187,12 +188,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1;
-
- /*
- * Scale up the timeout by the r2w factor
- */
- brq.data.timeout_ns <<= card->csd.r2w_factor;
- brq.data.timeout_clks <<= card->csd.r2w_factor;
}
if (brq.data.blocks > 1) {
@@ -324,52 +319,11 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->read_only = mmc_blk_readonly(card);
/*
- * Figure out a workable block size. MMC cards have:
- * - two block sizes, one for read and one for write.
- * - may support partial reads and/or writes
- * (allows block sizes smaller than specified)
- */
- md->block_bits = card->csd.read_blkbits;
- if (card->csd.write_blkbits != card->csd.read_blkbits) {
- if (card->csd.write_blkbits < card->csd.read_blkbits &&
- card->csd.read_partial) {
- /*
- * write block size is smaller than read block
- * size, but we support partial reads, so choose
- * the smaller write block size.
- */
- md->block_bits = card->csd.write_blkbits;
- } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
- card->csd.write_partial) {
- /*
- * read block size is smaller than write block
- * size, but we support partial writes. Use read
- * block size.
- */
- } else {
- /*
- * We don't support this configuration for writes.
- */
- printk(KERN_ERR "%s: unable to select block size for "
- "writing (rb%u wb%u rp%u wp%u)\n",
- mmc_card_id(card),
- 1 << card->csd.read_blkbits,
- 1 << card->csd.write_blkbits,
- card->csd.read_partial,
- card->csd.write_partial);
- md->read_only = 1;
- }
- }
-
- /*
- * Refuse to allow block sizes smaller than 512 bytes.
+ * Both SD and MMC specifications state (although a bit
+ * unclearly in the MMC case) that a block size of 512
+ * bytes must always be supported by the card.
*/
- if (md->block_bits < 9) {
- printk(KERN_ERR "%s: unable to support block size %u\n",
- mmc_card_id(card), 1 << md->block_bits);
- ret = -EINVAL;
- goto err_kfree;
- }
+ md->block_bits = 9;
md->disk = alloc_disk(1 << MMC_SHIFT);
if (md->disk == NULL) {