summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c46
-rw-r--r--drivers/mmc/card/queue.c23
3 files changed, 23 insertions, 49 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index dd0f398..3f2a912 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -2,7 +2,7 @@
# MMC/SD card drivers
#
-comment "MMC/SD Card Drivers"
+comment "MMC/SD/SDIO Card Drivers"
config MMC_BLOCK
tristate "MMC block device driver"
@@ -34,7 +34,6 @@ config MMC_BLOCK_BOUNCE
config SDIO_UART
tristate "SDIO UART/GPS class support"
- depends on MMC
help
SDIO function driver for SDIO cards that implements the UART
class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index efacee0..24c97d3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,7 +58,6 @@ struct mmc_blk_data {
struct mmc_queue queue;
unsigned int usage;
- unsigned int block_bits;
unsigned int read_only;
};
@@ -216,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request brq;
- int ret = 1, data_size, i;
- struct scatterlist *sg;
+ int ret = 1;
mmc_claim_host(card->host);
@@ -233,13 +231,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 1 << md->block_bits;
+ brq.data.blksz = 512;
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ brq.data.blocks = req->nr_sectors;
if (brq.data.blocks > 1) {
/* SPI multiblock writes terminate using a special
@@ -271,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_queue_bounce_pre(mq);
- /*
- * Adjust the sg list so it is the same size as the
- * request.
- */
- if (brq.data.blocks !=
- (req->nr_sectors >> (md->block_bits - 9))) {
- data_size = brq.data.blocks * brq.data.blksz;
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
- }
- brq.data.sg_len = i;
- }
-
mmc_wait_for_req(card->host, &brq.mrq);
mmc_queue_bounce_post(mq);
@@ -373,16 +351,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (rq_data_dir(req) != READ) {
if (mmc_card_sd(card)) {
u32 blocks;
- unsigned int bytes;
blocks = mmc_sd_num_wr_blocks(card);
if (blocks != (u32)-1) {
- if (card->csd.write_partial)
- bytes = blocks << md->block_bits;
- else
- bytes = blocks << 9;
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, bytes);
+ ret = __blk_end_request(req, 0, blocks << 9);
spin_unlock_irq(&md->lock);
}
} else {
@@ -432,13 +405,6 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
*/
md->read_only = mmc_blk_readonly(card);
- /*
- * Both SD and MMC specifications state (although a bit
- * unclearly in the MMC case) that a block size of 512
- * bytes must always be supported by the card.
- */
- md->block_bits = 9;
-
md->disk = alloc_disk(1 << MMC_SHIFT);
if (md->disk == NULL) {
ret = -ENOMEM;
@@ -476,7 +442,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
sprintf(md->disk->disk_name, "mmcblk%d", devidx);
- blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
+ blk_queue_hardsect_size(md->queue.queue, 512);
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
@@ -514,7 +480,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
mmc_claim_host(card->host);
cmd.opcode = MMC_SET_BLOCKLEN;
- cmd.arg = 1 << md->block_bits;
+ cmd.arg = 512;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 5);
mmc_release_host(card->host);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e..406989e 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -31,7 +31,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
/*
* We only like normal block requests.
*/
- if (!blk_fs_request(req) && !blk_pc_request(req)) {
+ if (!blk_fs_request(req)) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
@@ -131,6 +131,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
+ blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_hw_segs == 1) {
@@ -142,12 +143,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
+ if (bouncesz > (host->max_blk_count * 512))
+ bouncesz = host->max_blk_count * 512;
+
+ if (bouncesz > 512) {
+ mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mq->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce buffer\n",
+ mmc_card_name(card));
+ }
+ }
- mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mq->bounce_buf) {
- printk(KERN_WARNING "%s: unable to allocate "
- "bounce buffer\n", mmc_card_name(card));
- } else {
+ if (mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
if (!mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
+ blk_queue_max_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
OpenPOWER on IntegriCloud