diff options
author | Mauro Carvalho Chehab <mchehab@infradead.org> | 2008-07-24 14:05:50 -0300 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@infradead.org> | 2008-07-24 14:05:50 -0300 |
commit | 698158c799c7961824ccdb773250e16c0dd5d9e4 (patch) | |
tree | 96f034be9e9a95e6f68b5d20d1a3df7a3cbf724a /drivers/mmc/card | |
parent | 4c7827eec78abe6fe68fd29305806467f2a51e63 (diff) | |
parent | 338b9bb3adac0d2c5a1e180491d9b001d624c402 (diff) | |
download | op-kernel-dev-698158c799c7961824ccdb773250e16c0dd5d9e4.zip op-kernel-dev-698158c799c7961824ccdb773250e16c0dd5d9e4.tar.gz |
Merge ../linux-2.6
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/mmc_test.c | 225 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 97 |
2 files changed, 252 insertions, 70 deletions
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index d6b9b48..a067fe4 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -21,13 +21,17 @@ #define RESULT_UNSUP_HOST 2 #define RESULT_UNSUP_CARD 3 -#define BUFFER_SIZE (PAGE_SIZE * 4) +#define BUFFER_ORDER 2 +#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) struct mmc_test_card { struct mmc_card *card; u8 scratch[BUFFER_SIZE]; u8 *buffer; +#ifdef CONFIG_HIGHMEM + struct page *highmem; +#endif }; /*******************************************************************/ @@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test, int ret, i; unsigned long flags; + BUG_ON(blocks * blksz > BUFFER_SIZE); + if (write) { for (i = 0;i < blocks * blksz;i++) test->scratch[i] = i; } else { - memset(test->scratch, 0, BUFFER_SIZE); + memset(test->scratch, 0, blocks * blksz); } local_irq_save(flags); - sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); + sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz); local_irq_restore(flags); ret = mmc_test_set_blksize(test, blksz); @@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test, } } else { local_irq_save(flags); - sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); + sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz); local_irq_restore(flags); for (i = 0;i < blocks * blksz;i++) { if (test->scratch[i] != (u8)i) @@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) return 0; } +static int mmc_test_bigsg_write(struct mmc_test_card *test) +{ + int ret; + unsigned int size; + struct scatterlist sg; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + + memset(test->buffer, 0, BUFFER_SIZE); + + if (size < 1024) + return RESULT_UNSUP_HOST; + + sg_init_table(&sg, 1); + sg_init_one(&sg, test->buffer, BUFFER_SIZE); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_bigsg_read(struct mmc_test_card *test) +{ + int ret, i; + unsigned int size; + struct scatterlist sg; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + + if (size < 1024) + return RESULT_UNSUP_HOST; + + memset(test->buffer, 0xCD, BUFFER_SIZE); + + sg_init_table(&sg, 1); + sg_init_one(&sg, test->buffer, BUFFER_SIZE); + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); + if (ret) + return ret; + + /* mmc_test_transfer() doesn't check for read overflows */ + for (i = size;i < BUFFER_SIZE;i++) { + if (test->buffer[i] != 0xCD) + return RESULT_FAIL; + } + + return 0; +} + +#ifdef CONFIG_HIGHMEM + +static int mmc_test_write_high(struct mmc_test_card *test) +{ + int ret; + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, 512, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_read_high(struct mmc_test_card *test) +{ + int ret; + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, 512, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_multi_write_high(struct mmc_test_card *test) +{ + int ret; + unsigned int size; + struct scatterlist sg; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + + if (size < 1024) + return RESULT_UNSUP_HOST; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, size, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_multi_read_high(struct mmc_test_card *test) +{ + int ret; + unsigned int size; + struct scatterlist sg; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + + if (size < 1024) + return RESULT_UNSUP_HOST; + + sg_init_table(&sg, 1); + sg_set_page(&sg, test->highmem, size, 0); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); + if (ret) + return ret; + + return 0; +} + +#endif /* CONFIG_HIGHMEM */ + static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (no data verification)", @@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = { .name = "Correct xfer_size at read (midway failure)", .run = mmc_test_multi_xfersize_read, }, + + { + .name = "Over-sized SG list write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_bigsg_write, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Over-sized SG list read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_bigsg_read, + .cleanup = mmc_test_cleanup, + }, + +#ifdef CONFIG_HIGHMEM + + { + .name = "Highmem write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_write_high, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Highmem read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_read_high, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Multi-block highmem write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_multi_write_high, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "Multi-block highmem read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_multi_read_high, + .cleanup = mmc_test_cleanup, + }, + +#endif /* CONFIG_HIGHMEM */ + }; static struct mutex mmc_test_lock; @@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev, test->card = card; test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); +#ifdef CONFIG_HIGHMEM + test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); +#endif + +#ifdef CONFIG_HIGHMEM + if (test->buffer && test->highmem) { +#else if (test->buffer) { +#endif mutex_lock(&mmc_test_lock); mmc_test_run(test, testcase); mutex_unlock(&mmc_test_lock); } +#ifdef CONFIG_HIGHMEM + __free_pages(test->highmem, BUFFER_ORDER); +#endif kfree(test->buffer); kfree(test); @@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card) if (ret) return ret; + dev_info(&card->dev, "Card claimed for testing.\n"); + return 0; } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7731dde..3dee97e 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock printk(KERN_WARNING "%s: unable to allocate " "bounce buffer\n", mmc_card_name(card)); } else { - blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); + blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_sectors(mq->queue, bouncesz / 512); blk_queue_max_phys_segments(mq->queue, bouncesz / 512); blk_queue_max_hw_segments(mq->queue, bouncesz / 512); @@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq) } } -static void copy_sg(struct scatterlist *dst, unsigned int dst_len, - struct scatterlist *src, unsigned int src_len) -{ - unsigned int chunk; - char *dst_buf, *src_buf; - unsigned int dst_size, src_size; - - dst_buf = NULL; - src_buf = NULL; - dst_size = 0; - src_size = 0; - - while (src_len) { - BUG_ON(dst_len == 0); - - if (dst_size == 0) { - dst_buf = sg_virt(dst); - dst_size = dst->length; - } - - if (src_size == 0) { - src_buf = sg_virt(src); - src_size = src->length; - } - - chunk = min(dst_size, src_size); - - memcpy(dst_buf, src_buf, chunk); - - dst_buf += chunk; - src_buf += chunk; - dst_size -= chunk; - src_size -= chunk; - - if (dst_size == 0) { - dst++; - dst_len--; - } - - if (src_size == 0) { - src++; - src_len--; - } - } -} - +/* + * Prepare the sg list(s) to be handed of to the host driver + */ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) { unsigned int sg_len; + size_t buflen; + struct scatterlist *sg; + int i; if (!mq->bounce_buf) return blk_rq_map_sg(mq->queue, mq->req, mq->sg); @@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) mq->bounce_sg_len = sg_len; - /* - * Shortcut in the event we only get a single entry. - */ - if (sg_len == 1) { - memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); - return 1; - } + buflen = 0; + for_each_sg(mq->bounce_sg, sg, sg_len, i) + buflen += sg->length; - sg_init_one(mq->sg, mq->bounce_buf, 0); - - while (sg_len) { - mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; - sg_len--; - } + sg_init_one(mq->sg, mq->bounce_buf, buflen); return 1; } +/* + * If writing, bounce the data to the buffer before the request + * is sent to the host driver + */ void mmc_queue_bounce_pre(struct mmc_queue *mq) { + unsigned long flags; + if (!mq->bounce_buf) return; - if (mq->bounce_sg_len == 1) - return; if (rq_data_dir(mq->req) != WRITE) return; - copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); + local_irq_save(flags); + sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, + mq->bounce_buf, mq->sg[0].length); + local_irq_restore(flags); } +/* + * If reading, bounce the data from the buffer after the request + * has been handled by the host driver + */ void mmc_queue_bounce_post(struct mmc_queue *mq) { + unsigned long flags; + if (!mq->bounce_buf) return; - if (mq->bounce_sg_len == 1) - return; if (rq_data_dir(mq->req) != READ) return; - copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); + local_irq_save(flags); + sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, + mq->bounce_buf, mq->sg[0].length); + local_irq_restore(flags); } |