diff options
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/block.c | 92 | ||||
-rw-r--r-- | drivers/mmc/card/mmc_test.c | 811 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 20 |
3 files changed, 912 insertions, 11 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 8433cde29c8..00073b7c036 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -29,7 +29,6 @@ #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> @@ -51,6 +50,7 @@ MODULE_ALIAS("mmc:block"); #define MMC_SHIFT 3 #define MMC_NUM_MINORS (256 >> MMC_SHIFT) +static DEFINE_MUTEX(block_mutex); static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS); /* @@ -108,7 +108,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; - lock_kernel(); + mutex_lock(&block_mutex); if (md) { if (md->usage == 2) check_disk_change(bdev); @@ -119,7 +119,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) ret = -EROFS; } } - unlock_kernel(); + mutex_unlock(&block_mutex); return ret; } @@ -128,9 +128,9 @@ static int mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; - lock_kernel(); + mutex_lock(&block_mutex); mmc_blk_put(md); - unlock_kernel(); + mutex_unlock(&block_mutex); return 0; } @@ -247,7 +247,76 @@ static u32 get_card_status(struct mmc_card *card, struct request *req) return cmd.resp[0]; } -static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) +static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + unsigned int from, nr, arg; + int err = 0; + + mmc_claim_host(card->host); + + if (!mmc_can_erase(card)) { + err = -EOPNOTSUPP; + goto out; + } + + from = blk_rq_pos(req); + nr = blk_rq_sectors(req); + + if (mmc_can_trim(card)) + arg = MMC_TRIM_ARG; + else + arg = MMC_ERASE_ARG; + + err = mmc_erase(card, from, nr, arg); +out: + spin_lock_irq(&md->lock); + __blk_end_request(req, err, blk_rq_bytes(req)); + spin_unlock_irq(&md->lock); + + mmc_release_host(card->host); + + return err ? 0 : 1; +} + +static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + unsigned int from, nr, arg; + int err = 0; + + mmc_claim_host(card->host); + + if (!mmc_can_secure_erase_trim(card)) { + err = -EOPNOTSUPP; + goto out; + } + + from = blk_rq_pos(req); + nr = blk_rq_sectors(req); + + if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) + arg = MMC_SECURE_TRIM1_ARG; + else + arg = MMC_SECURE_ERASE_ARG; + + err = mmc_erase(card, from, nr, arg); + if (!err && arg == MMC_SECURE_TRIM1_ARG) + err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); +out: + spin_lock_irq(&md->lock); + __blk_end_request(req, err, blk_rq_bytes(req)); + spin_unlock_irq(&md->lock); + + mmc_release_host(card->host); + + return err ? 0 : 1; +} + +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; @@ -475,6 +544,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) return 0; } +static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) +{ + if (req->cmd_flags & REQ_DISCARD) { + if (req->cmd_flags & REQ_SECURE) + return mmc_blk_issue_secdiscard_rq(mq, req); + else + return mmc_blk_issue_discard_rq(mq, req); + } else { + return mmc_blk_issue_rw_rq(mq, req); + } +} static inline int mmc_blk_readonly(struct mmc_card *card) { diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 445d7db2277..5dd8576b5c1 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -16,6 +16,7 @@ #include <linux/slab.h> #include <linux/scatterlist.h> +#include <linux/swap.h> /* For nr_free_buffer_pages() */ #define RESULT_OK 0 #define RESULT_FAIL 1 @@ -25,6 +26,60 @@ #define BUFFER_ORDER 2 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) +/* + * Limit the test area size to the maximum MMC HC erase group size. Note that + * the maximum SD allocation unit size is just 4MiB. + */ +#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) + +/** + * struct mmc_test_pages - pages allocated by 'alloc_pages()'. + * @page: first page in the allocation + * @order: order of the number of pages allocated + */ +struct mmc_test_pages { + struct page *page; + unsigned int order; +}; + +/** + * struct mmc_test_mem - allocated memory. + * @arr: array of allocations + * @cnt: number of allocations + */ +struct mmc_test_mem { + struct mmc_test_pages *arr; + unsigned int cnt; +}; + +/** + * struct mmc_test_area - information for performance tests. + * @max_sz: test area size (in bytes) + * @dev_addr: address on card at which to do performance tests + * @max_segs: maximum segments in scatterlist @sg + * @blocks: number of (512 byte) blocks currently mapped by @sg + * @sg_len: length of currently mapped scatterlist @sg + * @mem: allocated memory + * @sg: scatterlist + */ +struct mmc_test_area { + unsigned long max_sz; + unsigned int dev_addr; + unsigned int max_segs; + unsigned int blocks; + unsigned int sg_len; + struct mmc_test_mem *mem; + struct scatterlist *sg; +}; + +/** + * struct mmc_test_card - test information. + * @card: card under test + * @scratch: transfer buffer + * @buffer: transfer buffer + * @highmem: buffer for highmem tests + * @area: information for performance tests + */ struct mmc_test_card { struct mmc_card *card; @@ -33,6 +88,7 @@ struct mmc_test_card { #ifdef CONFIG_HIGHMEM struct page *highmem; #endif + struct mmc_test_area area; }; /*******************************************************************/ @@ -97,6 +153,12 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, mmc_set_data_timeout(mrq->data, test->card); } +static int mmc_test_busy(struct mmc_command *cmd) +{ + return !(cmd->resp[0] & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(cmd->resp[0]) == 7); +} + /* * Wait for the card to finish the busy state */ @@ -117,13 +179,13 @@ static int mmc_test_wait_busy(struct mmc_test_card *test) if (ret) break; - if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { + if (!busy && mmc_test_busy(&cmd)) { busy = 1; printk(KERN_INFO "%s: Warning: Host did not " "wait for busy state to end.\n", mmc_hostname(test->card->host)); } - } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); + } while (mmc_test_busy(&cmd)); return ret; } @@ -170,6 +232,248 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test, return 0; } +static void mmc_test_free_mem(struct mmc_test_mem *mem) +{ + if (!mem) + return; + while (mem->cnt--) + __free_pages(mem->arr[mem->cnt].page, + mem->arr[mem->cnt].order); + kfree(mem->arr); + kfree(mem); +} + +/* + * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case + * there isn't much memory do not exceed 1/16th total lowmem pages. + */ +static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, + unsigned long max_sz) +{ + unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); + unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); + unsigned long page_cnt = 0; + unsigned long limit = nr_free_buffer_pages() >> 4; + struct mmc_test_mem *mem; + + if (max_page_cnt > limit) + max_page_cnt = limit; + if (max_page_cnt < min_page_cnt) + max_page_cnt = min_page_cnt; + + mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); + if (!mem) + return NULL; + + mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt, + GFP_KERNEL); + if (!mem->arr) + goto out_free; + + while (max_page_cnt) { + struct page *page; + unsigned int order; + gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | + __GFP_NORETRY; + + order = get_order(max_page_cnt << PAGE_SHIFT); + while (1) { + page = alloc_pages(flags, order); + if (page || !order) + break; + order -= 1; + } + if (!page) { + if (page_cnt < min_page_cnt) + goto out_free; + break; + } + mem->arr[mem->cnt].page = page; + mem->arr[mem->cnt].order = order; + mem->cnt += 1; + if (max_page_cnt <= (1UL << order)) + break; + max_page_cnt -= 1UL << order; + page_cnt += 1UL << order; + } + + return mem; + +out_free: + mmc_test_free_mem(mem); + return NULL; +} + +/* + * Map memory into a scatterlist. Optionally allow the same memory to be + * mapped more than once. + */ +static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, + struct scatterlist *sglist, int repeat, + unsigned int max_segs, unsigned int *sg_len) +{ + struct scatterlist *sg = NULL; + unsigned int i; + + sg_init_table(sglist, max_segs); + + *sg_len = 0; + do { + for (i = 0; i < mem->cnt; i++) { + unsigned long len = PAGE_SIZE << mem->arr[i].order; + + if (sz < len) + len = sz; + if (sg) + sg = sg_next(sg); + else + sg = sglist; + if (!sg) + return -EINVAL; + sg_set_page(sg, mem->arr[i].page, len, 0); + sz -= len; + *sg_len += 1; + if (!sz) + break; + } + } while (sz && repeat); + + if (sz) + return -EINVAL; + + if (sg) + sg_mark_end(sg); + + return 0; +} + +/* + * Map memory into a scatterlist so that no pages are contiguous. Allow the + * same memory to be mapped more than once. + */ +static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, + unsigned long sz, + struct scatterlist *sglist, + unsigned int max_segs, + unsigned int *sg_len) +{ + struct scatterlist *sg = NULL; + unsigned int i = mem->cnt, cnt; + unsigned long len; + void *base, *addr, *last_addr = NULL; + + sg_init_table(sglist, max_segs); + + *sg_len = 0; + while (sz && i) { + base = page_address(mem->arr[--i].page); + cnt = 1 << mem->arr[i].order; + while (sz && cnt) { + addr = base + PAGE_SIZE * --cnt; + if (last_addr && last_addr + PAGE_SIZE == addr) + continue; + last_addr = addr; + len = PAGE_SIZE; + if (sz < len) + len = sz; + if (sg) + sg = sg_next(sg); + else + sg = sglist; + if (!sg) + return -EINVAL; + sg_set_page(sg, virt_to_page(addr), len, 0); + sz -= len; + *sg_len += 1; + } + } + + if (sg) + sg_mark_end(sg); + + return 0; +} + +/* + * Calculate transfer rate in bytes per second. + */ +static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) +{ + uint64_t ns; + + ns = ts->tv_sec; + ns *= 1000000000; + ns += ts->tv_nsec; + + bytes *= 1000000000; + + while (ns > UINT_MAX) { + bytes >>= 1; + ns >>= 1; + } + + if (!ns) + return 0; + + do_div(bytes, (uint32_t)ns); + + return bytes; +} + +/* + * Print the transfer rate. + */ +static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, + struct timespec *ts1, struct timespec *ts2) +{ + unsigned int rate, sectors = bytes >> 9; + struct timespec ts; + + ts = timespec_sub(*ts2, *ts1); + + rate = mmc_test_rate(bytes, &ts); + + printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " + "seconds (%u kB/s, %u KiB/s)\n", + mmc_hostname(test->card->host), sectors, sectors >> 1, + (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec, + (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); +} + +/* + * Print the average transfer rate. + */ +static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, + unsigned int count, struct timespec *ts1, + struct timespec *ts2) +{ + unsigned int rate, sectors = bytes >> 9; + uint64_t tot = bytes * count; + struct timespec ts; + + ts = timespec_sub(*ts2, *ts1); + + rate = mmc_test_rate(tot, &ts); + + printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " + "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", + mmc_hostname(test->card->host), count, sectors, count, + sectors >> 1, (sectors == 1 ? ".5" : ""), + (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, + rate / 1000, rate / 1024); +} + +/* + * Return the card size in sectors. + */ +static unsigned int mmc_test_capacity(struct mmc_card *card) +{ + if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) + return card->ext_csd.sectors; + else + return card->csd.capacity << (card->csd.read_blkbits - 9); +} + /*******************************************************************/ /* Test preparation and cleanup */ /*******************************************************************/ @@ -893,8 +1197,419 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test) return 0; } +#else + +static int mmc_test_no_highmem(struct mmc_test_card *test) +{ + printk(KERN_INFO "%s: Highmem not configured - test skipped\n", + mmc_hostname(test->card->host)); + return 0; +} + #endif /* CONFIG_HIGHMEM */ +/* + * Map sz bytes so that it can be transferred. + */ +static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, + int max_scatter) +{ + struct mmc_test_area *t = &test->area; + + t->blocks = sz >> 9; + + if (max_scatter) { + return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, + t->max_segs, &t->sg_len); + } else { + return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, + &t->sg_len); + } +} + +/* + * Transfer bytes mapped by mmc_test_area_map(). + */ +static int mmc_test_area_transfer(struct mmc_test_card *test, + unsigned int dev_addr, int write) +{ + struct mmc_test_area *t = &test->area; + + return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, + t->blocks, 512, write); +} + +/* + * Map and transfer bytes. + */ +static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, + unsigned int dev_addr, int write, int max_scatter, + int timed) +{ + struct timespec ts1, ts2; + int ret; + + ret = mmc_test_area_map(test, sz, max_scatter); + if (ret) + return ret; + + if (timed) + getnstimeofday(&ts1); + + ret = mmc_test_area_transfer(test, dev_addr, write); + if (ret) + return ret; + + if (timed) + getnstimeofday(&ts2); + + if (timed) + mmc_test_print_rate(test, sz, &ts1, &ts2); + + return 0; +} + +/* + * Write the test area entirely. + */ +static int mmc_test_area_fill(struct mmc_test_card *test) +{ + return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, + 1, 0, 0); +} + +/* + * Erase the test area entirely. + */ +static int mmc_test_area_erase(struct mmc_test_card *test) +{ + struct mmc_test_area *t = &test->area; + + if (!mmc_can_erase(test->card)) + return 0; + + return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, + MMC_ERASE_ARG); +} + +/* + * Cleanup struct mmc_test_area. + */ +static int mmc_test_area_cleanup(struct mmc_test_card *test) +{ + struct mmc_test_area *t = &test->area; + + kfree(t->sg); + mmc_test_free_mem(t->mem); + + return 0; +} + +/* + * Initialize an area for testing large transfers. The size of the area is the + * preferred erase size which is a good size for optimal transfer speed. Note + * that is typically 4MiB for modern cards. The test area is set to the middle + * of the card because cards may have different charateristics at the front + * (for FAT file system optimization). Optionally, the area is erased (if the + * card supports it) which may improve write performance. Optionally, the area + * is filled with data for subsequent read tests. + */ +static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) +{ + struct mmc_test_area *t = &test->area; + unsigned long min_sz = 64 * 1024; + int ret; + + ret = mmc_test_set_blksize(test, 512); + if (ret) + return ret; + + if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) + t->max_sz = TEST_AREA_MAX_SIZE; + else + t->max_sz = (unsigned long)test->card->pref_erase << 9; + /* + * Try to allocate enough memory for the whole area. Less is OK + * because the same memory can be mapped into the scatterlist more than + * once. + */ + t->mem = mmc_test_alloc_mem(min_sz, t->max_sz); + if (!t->mem) + return -ENOMEM; + + t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE); + t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); + if (!t->sg) { + ret = -ENOMEM; + goto out_free; + } + + t->dev_addr = mmc_test_capacity(test->card) / 2; + t->dev_addr -= t->dev_addr % (t->max_sz >> 9); + + if (erase) { + ret = mmc_test_area_erase(test); + if (ret) + goto out_free; + } + + if (fill) { + ret = mmc_test_area_fill(test); + if (ret) + goto out_free; + } + + return 0; + +out_free: + mmc_test_area_cleanup(test); + return ret; +} + +/* + * Prepare for large transfers. Do not erase the test area. + */ +static int mmc_test_area_prepare(struct mmc_test_card *test) +{ + return mmc_test_area_init(test, 0, 0); +} + +/* + * Prepare for large transfers. Do erase the test area. + */ +static int mmc_test_area_prepare_erase(struct mmc_test_card *test) +{ + return mmc_test_area_init(test, 1, 0); +} + +/* + * Prepare for large transfers. Erase and fill the test area. + */ +static int mmc_test_area_prepare_fill(struct mmc_test_card *test) +{ + return mmc_test_area_init(test, 1, 1); +} + +/* + * Test best-case performance. Best-case performance is expected from + * a single large transfer. + * + * An additional option (max_scatter) allows the measurement of the same + * transfer but with no contiguous pages in the scatter list. This tests + * the efficiency of DMA to handle scattered pages. + */ +static int mmc_test_best_performance(struct mmc_test_card *test, int write, + int max_scatter) +{ + return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr, + write, max_scatter, 1); +} + +/* + * Best-case read performance. + */ +static int mmc_test_best_read_performance(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 0, 0); +} + +/* + * Best-case write performance. + */ +static int mmc_test_best_write_performance(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 1, 0); +} + +/* + * Best-case read performance into scattered pages. + */ +static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 0, 1); +} + +/* + * Best-case write performance from scattered pages. + */ +static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) +{ + return mmc_test_best_performance(test, 1, 1); +} + +/* + * Single read performance by transfer size. + */ +static int mmc_test_profile_read_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr; + int ret; + + for (sz = 512; sz < test->area.max_sz; sz <<= 1) { + dev_addr = test->area.dev_addr + (sz >> 9); + ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); + if (ret) + return ret; + } + dev_addr = test->area.dev_addr; + return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); +} + +/* + * Single write performance by transfer size. + */ +static int mmc_test_profile_write_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr; + int ret; + + ret = mmc_test_area_erase(test); + if (ret) + return ret; + for (sz = 512; sz < test->area.max_sz; sz <<= 1) { + dev_addr = test->area.dev_addr + (sz >> 9); + ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); + if (ret) + return ret; + } + ret = mmc_test_area_erase(test); + if (ret) + return ret; + dev_addr = test->area.dev_addr; + return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); +} + +/* + * Single trim performance by transfer size. + */ +static int mmc_test_profile_trim_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr; + struct timespec ts1, ts2; + int ret; + + if (!mmc_can_trim(test->card)) + return RESULT_UNSUP_CARD; + + if (!mmc_can_erase(test->card)) + return RESULT_UNSUP_HOST; + + for (sz = 512; sz < test->area.max_sz; sz <<= 1) { + dev_addr = test->area.dev_addr + (sz >> 9); + getnstimeofday(&ts1); + ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); + if (ret) + return ret; + getnstimeofday(&ts2); + mmc_test_print_rate(test, sz, &ts1, &ts2); + } + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); + if (ret) + return ret; + getnstimeofday(&ts2); + mmc_test_print_rate(test, sz, &ts1, &ts2); + return 0; +} + +/* + * Consecutive read performance by transfer size. + */ +static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr, i, cnt; + struct timespec ts1, ts2; + int ret; + + for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + cnt = test->area.max_sz / sz; + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); + if (ret) + return ret; + dev_addr += (sz >> 9); + } + getnstimeofday(&ts2); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + } + return 0; +} + +/* + * Consecutive write performance by transfer size. + */ +static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr, i, cnt; + struct timespec ts1, ts2; + int ret; + + for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + ret = mmc_test_area_erase(test); + if (ret) + return ret; + cnt = test->area.max_sz / sz; + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); + if (ret) + return ret; + dev_addr += (sz >> 9); + } + getnstimeofday(&ts2); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + } + return 0; +} + +/* + * Consecutive trim performance by transfer size. + */ +static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) +{ + unsigned long sz; + unsigned int dev_addr, i, cnt; + struct timespec ts1, ts2; + int ret; + + if (!mmc_can_trim(test->card)) + return RESULT_UNSUP_CARD; + + if (!mmc_can_erase(test->card)) + return RESULT_UNSUP_HOST; + + for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + ret = mmc_test_area_erase(test); + if (ret) + return ret; + ret = mmc_test_area_fill(test); + if (ret) + return ret; + cnt = test->area.max_sz / sz; + dev_addr = test->area.dev_addr; + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_erase(test->card, dev_addr, sz >> 9, + MMC_TRIM_ARG); + if (ret) + return ret; + dev_addr += (sz >> 9); + } + getnstimeofday(&ts2); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + } + return 0; +} + static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (no data verification)", @@ -1040,8 +1755,100 @@ static const struct mmc_test_case mmc_test_cases[] = { .cleanup = mmc_test_cleanup, }, +#else + + { + .name = "Highmem write", + .run = mmc_test_no_highmem, + }, + + { + .name = "Highmem read", + .run = mmc_test_no_highmem, + }, + + { + .name = "Multi-block highmem write", + .run = mmc_test_no_highmem, + }, + + { + .name = "Multi-block highmem read", + .run = mmc_test_no_highmem, + }, + #endif /* CONFIG_HIGHMEM */ + { + .name = "Best-case read performance", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_best_read_performance, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Best-case write performance", + .prepare = mmc_test_area_prepare_erase, + .run = mmc_test_best_write_performance, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Best-case read performance into scattered pages", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_best_read_perf_max_scatter, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Best-case write performance from scattered pages", + .prepare = mmc_test_area_prepare_erase, + .run = mmc_test_best_write_perf_max_scatter, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Single read performance by transfer size", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_profile_read_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Single write performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_profile_write_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Single trim performance by transfer size", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_profile_trim_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Consecutive read performance by transfer size", + .prepare = mmc_test_area_prepare_fill, + .run = mmc_test_profile_seq_read_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Consecutive write performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_profile_seq_write_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Consecutive trim performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_profile_seq_trim_perf, + .cleanup = mmc_test_area_cleanup, + }, + }; static DEFINE_MUTEX(mmc_test_lock); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index c77eb49eda0..9c0b42bfe08 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -30,9 +30,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) { /* - * We only like normal block requests. + * We only like normal block requests and discards. */ - if (req->cmd_type != REQ_TYPE_FS) { + if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } @@ -128,8 +128,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); - blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); + if (mmc_can_erase(card)) { + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); + mq->queue->limits.max_discard_sectors = UINT_MAX; + if (card->erased_byte == 0) + mq->queue->limits.discard_zeroes_data = 1; + if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { + mq->queue->limits.discard_granularity = + card->erase_size << 9; + mq->queue->limits.discard_alignment = + card->erase_size << 9; + } + if (mmc_can_secure_erase_trim(card)) + queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, + mq->queue); + } #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_hw_segs == 1) { |