diff options
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/block.c | 17 | ||||
-rw-r--r-- | drivers/mmc/card/mmc_test.c | 140 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 97 |
3 files changed, 181 insertions, 73 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 66e5a5487c2..86dbb366415 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -213,7 +213,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request brq; - int ret = 1, sg_pos, data_size; + int ret = 1, data_size, i; + struct scatterlist *sg; mmc_claim_host(card->host); @@ -267,18 +268,22 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_queue_bounce_pre(mq); + /* + * Adjust the sg list so it is the same size as the + * request. + */ if (brq.data.blocks != (req->nr_sectors >> (md->block_bits - 9))) { data_size = brq.data.blocks * brq.data.blksz; - for (sg_pos = 0; sg_pos < brq.data.sg_len; sg_pos++) { - data_size -= mq->sg[sg_pos].length; + for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { + data_size -= sg->length; if (data_size <= 0) { - mq->sg[sg_pos].length += data_size; - sg_pos++; + sg->length += data_size; + i++; break; } } - brq.data.sg_len = sg_pos; + brq.data.sg_len = i; } mmc_wait_for_req(card->host, &brq.mrq); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index d6b9b486417..f26b01d811a 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -21,13 +21,17 @@ #define RESULT_UNSUP_HOST 2 #define RESULT_UNSUP_CARD 3 -#define BUFFER_SIZE (PAGE_SIZE * 4) +#define BUFFER_ORDER 2 +#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) struct mmc_test_card { struct mmc_card *card; u8 scratch[BUFFER_SIZE]; u8 *buffer; +#ifdef CONFIG_HIGHMEM + struct page *highmem; +#endif }; /*******************************************************************/ @@ -799,6 +803,94 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) return 0; } +#ifdef CONFIG_HIGHMEM + +static int mmc_test_write_high(struct mmc_test_card *test) +{ + int ret; + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, 512, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_read_high(struct mmc_test_card *test) +{ + int ret; + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, 512, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_multi_write_high(struct mmc_test_card *test) +{ + int ret; + unsigned int size; + struct scatterlist sg; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + + if (size < 1024) + return RESULT_UNSUP_HOST; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, size, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_multi_read_high(struct mmc_test_card *test) +{ + int ret; + unsigned int size; + struct scatterlist sg; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + + if (size < 1024) + return RESULT_UNSUP_HOST; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, size, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); + if (ret) + return ret; + + return 0; +} + +#endif /* CONFIG_HIGHMEM */ + static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (no data verification)", @@ -913,6 +1005,39 @@ static const struct mmc_test_case mmc_test_cases[] = { .name = "Correct xfer_size at read (midway failure)", .run = mmc_test_multi_xfersize_read, }, + +#ifdef CONFIG_HIGHMEM + + { + .name = "Highmem write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_write_high, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Highmem read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_read_high, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Multi-block highmem write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_multi_write_high, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Multi-block highmem read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_multi_read_high, + .cleanup = mmc_test_cleanup, + }, + +#endif /* CONFIG_HIGHMEM */ + }; static struct mutex mmc_test_lock; @@ -1014,12 +1139,23 @@ static ssize_t mmc_test_store(struct device *dev, test->card = card; test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); +#ifdef CONFIG_HIGHMEM + test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); +#endif + +#ifdef CONFIG_HIGHMEM + if (test->buffer && test->highmem) { +#else if (test->buffer) { +#endif mutex_lock(&mmc_test_lock); mmc_test_run(test, testcase); mutex_unlock(&mmc_test_lock); } +#ifdef CONFIG_HIGHMEM + __free_pages(test->highmem, BUFFER_ORDER); +#endif kfree(test->buffer); kfree(test); @@ -1041,6 +1177,8 @@ static int mmc_test_probe(struct mmc_card *card) if (ret) return ret; + dev_info(&card->dev, "Card claimed for testing.\n"); + return 0; } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7731ddefdc1..3dee97e7d16 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock printk(KERN_WARNING "%s: unable to allocate " "bounce buffer\n", mmc_card_name(card)); } else { - blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); + blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_sectors(mq->queue, bouncesz / 512); blk_queue_max_phys_segments(mq->queue, bouncesz / 512); blk_queue_max_hw_segments(mq->queue, bouncesz / 512); @@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq) } } -static void copy_sg(struct scatterlist *dst, unsigned int dst_len, - struct scatterlist *src, unsigned int src_len) -{ - unsigned int chunk; - char *dst_buf, *src_buf; - unsigned int dst_size, src_size; - - dst_buf = NULL; - src_buf = NULL; - dst_size = 0; - src_size = 0; - - while (src_len) { - BUG_ON(dst_len == 0); - - if (dst_size == 0) { - dst_buf = sg_virt(dst); - dst_size = dst->length; - } - - if (src_size == 0) { - src_buf = sg_virt(src); - src_size = src->length; - } - - chunk = min(dst_size, src_size); - - memcpy(dst_buf, src_buf, chunk); - - dst_buf += chunk; - src_buf += chunk; - dst_size -= chunk; - src_size -= chunk; - - if (dst_size == 0) { - dst++; - dst_len--; - } - - if (src_size == 0) { - src++; - src_len--; - } - } -} - +/* + * Prepare the sg list(s) to be handed of to the host driver + */ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) { unsigned int sg_len; + size_t buflen; + struct scatterlist *sg; + int i; if (!mq->bounce_buf) return blk_rq_map_sg(mq->queue, mq->req, mq->sg); @@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) mq->bounce_sg_len = sg_len; - /* - * Shortcut in the event we only get a single entry. - */ - if (sg_len == 1) { - memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); - return 1; - } + buflen = 0; + for_each_sg(mq->bounce_sg, sg, sg_len, i) + buflen += sg->length; - sg_init_one(mq->sg, mq->bounce_buf, 0); - - while (sg_len) { - mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; - sg_len--; - } + sg_init_one(mq->sg, mq->bounce_buf, buflen); return 1; } +/* + * If writing, bounce the data to the buffer before the request + * is sent to the host driver + */ void mmc_queue_bounce_pre(struct mmc_queue *mq) { + unsigned long flags; + if (!mq->bounce_buf) return; - if (mq->bounce_sg_len == 1) - return; if (rq_data_dir(mq->req) != WRITE) return; - copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); + local_irq_save(flags); + sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, + mq->bounce_buf, mq->sg[0].length); + local_irq_restore(flags); } +/* + * If reading, bounce the data from the buffer after the request + * has been handled by the host driver + */ void mmc_queue_bounce_post(struct mmc_queue *mq) { + unsigned long flags; + if (!mq->bounce_buf) return; - if (mq->bounce_sg_len == 1) - return; if (rq_data_dir(mq->req) != READ) return; - copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); + local_irq_save(flags); + sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, + mq->bounce_buf, mq->sg[0].length); + local_irq_restore(flags); } |