summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2008-06-28 13:22:40 +0200
committerPierre Ossman <drzeus@drzeus.cx>2008-07-15 14:14:44 +0200
commiteea0f581c4e596e02250df230f8d385827977964 (patch)
tree7c8c53b63c6cd5f6bcea49b53f3d436dccd6faa0 /drivers
parentad3868b2ec96ec14a1549c9e33f5f9a2a3c6ab15 (diff)
sdio: clean up handling of byte mode transfer size
Make sure that the maximum size for a byte mode transfer is identical in all places. Also tweak the transfer helper so that a single byte mode transfer is preferred over (possibly multiple) block mode request(s). Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/core/sdio_io.c29
1 files changed, 18 insertions, 11 deletions
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 6ee7861fcea..cc42a41ff6a 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -186,9 +186,20 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
func->cur_blksize = blksz;
return 0;
}
-
EXPORT_SYMBOL_GPL(sdio_set_block_size);
+/*
+ * Calculate the maximum byte mode transfer size
+ */
+static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
+{
+ return min(min(min(
+ func->card->host->max_seg_size,
+ func->card->host->max_blk_size),
+ func->max_blksize),
+ 512u); /* maximum size for byte mode */
+}
+
/**
* sdio_align_size - pads a transfer size to a more optimal value
* @func: SDIO function
@@ -222,7 +233,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* If we can still do this with just a byte transfer, then
* we're done.
*/
- if ((sz <= func->cur_blksize) && (sz <= 512))
+ if (sz <= sdio_max_byte_size(func))
return sz;
if (func->card->cccr.multi_block) {
@@ -253,7 +264,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
*/
byte_sz = mmc_align_data_size(func->card,
sz % func->cur_blksize);
- if ((byte_sz <= func->cur_blksize) && (byte_sz <= 512)) {
+ if (byte_sz <= sdio_max_byte_size(func)) {
blk_sz = sz / func->cur_blksize;
return blk_sz * func->cur_blksize + byte_sz;
}
@@ -263,8 +274,8 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* controller can handle the chunk size;
*/
chunk_sz = mmc_align_data_size(func->card,
- min(func->cur_blksize, 512u));
- if (chunk_sz == min(func->cur_blksize, 512u)) {
+ sdio_max_byte_size(func));
+ if (chunk_sz == sdio_max_byte_size(func)) {
/*
* Fix up the size of the remainder (if any)
*/
@@ -296,7 +307,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
int ret;
/* Do the bulk of the transfer using block mode (if supported). */
- if (func->card->cccr.multi_block) {
+ if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
/* Blocks per command is limited by host count, host transfer
* size (we only use a single sg entry) and the maximum for
* IO_RW_EXTENDED of 511 blocks. */
@@ -328,11 +339,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
/* Write the remainder using byte mode. */
while (remainder > 0) {
- size = remainder;
- if (size > func->cur_blksize)
- size = func->cur_blksize;
- if (size > 512)
- size = 512; /* maximum size for byte mode */
+ size = min(remainder, sdio_max_byte_size(func));
ret = mmc_io_rw_extended(func->card, write, func->num, addr,
incr_addr, buf, 1, size);