summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2008-07-22 14:35:42 +0200
committerPierre Ossman <drzeus@drzeus.cx>2008-07-23 14:42:09 +0200
commit2ff1fa679115e3c8c78ad74ad8fd2d7fd87ae4e7 (patch)
tree441ea088fa64fa0d22e23b048731ad16a16bc3a6 /drivers/mmc/card
parent719a61b452ff74cf81a96e4212748d9d63bcc924 (diff)
downloadop-kernel-dev-2ff1fa679115e3c8c78ad74ad8fd2d7fd87ae4e7.zip
op-kernel-dev-2ff1fa679115e3c8c78ad74ad8fd2d7fd87ae4e7.tar.gz
mmc_block: bounce buffer highmem support
Support highmem pages in the bounce buffer code by using the sg_copy_from/to_buffer() functions. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/queue.c97
1 files changed, 31 insertions, 66 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7731dde..3dee97e 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
printk(KERN_WARNING "%s: unable to allocate "
"bounce buffer\n", mmc_card_name(card));
} else {
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
-static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
- struct scatterlist *src, unsigned int src_len)
-{
- unsigned int chunk;
- char *dst_buf, *src_buf;
- unsigned int dst_size, src_size;
-
- dst_buf = NULL;
- src_buf = NULL;
- dst_size = 0;
- src_size = 0;
-
- while (src_len) {
- BUG_ON(dst_len == 0);
-
- if (dst_size == 0) {
- dst_buf = sg_virt(dst);
- dst_size = dst->length;
- }
-
- if (src_size == 0) {
- src_buf = sg_virt(src);
- src_size = src->length;
- }
-
- chunk = min(dst_size, src_size);
-
- memcpy(dst_buf, src_buf, chunk);
-
- dst_buf += chunk;
- src_buf += chunk;
- dst_size -= chunk;
- src_size -= chunk;
-
- if (dst_size == 0) {
- dst++;
- dst_len--;
- }
-
- if (src_size == 0) {
- src++;
- src_len--;
- }
- }
-}
-
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
unsigned int sg_len;
+ size_t buflen;
+ struct scatterlist *sg;
+ int i;
if (!mq->bounce_buf)
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
mq->bounce_sg_len = sg_len;
- /*
- * Shortcut in the event we only get a single entry.
- */
- if (sg_len == 1) {
- memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
- return 1;
- }
+ buflen = 0;
+ for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, 0);
-
- while (sg_len) {
- mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
- sg_len--;
- }
+ sg_init_one(mq->sg, mq->bounce_buf, buflen);
return 1;
}
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
+ unsigned long flags;
+
if (!mq->bounce_buf)
return;
- if (mq->bounce_sg_len == 1)
- return;
if (rq_data_dir(mq->req) != WRITE)
return;
- copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
+ local_irq_save(flags);
+ sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
+ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
}
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
+ unsigned long flags;
+
if (!mq->bounce_buf)
return;
- if (mq->bounce_sg_len == 1)
- return;
if (rq_data_dir(mq->req) != READ)
return;
- copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
+ local_irq_save(flags);
+ sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
+ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
}
OpenPOWER on IntegriCloud