summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/Kconfig52
-rw-r--r--drivers/mmc/card/Makefile14
-rw-r--r--drivers/mmc/card/block.c615
-rw-r--r--drivers/mmc/card/mmc_test.c1211
-rw-r--r--drivers/mmc/card/queue.c370
-rw-r--r--drivers/mmc/card/queue.h31
-rw-r--r--drivers/mmc/card/sdio_uart.c1161
7 files changed, 3454 insertions, 0 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
new file mode 100644
index 0000000..3f2a912
--- /dev/null
+++ b/drivers/mmc/card/Kconfig
@@ -0,0 +1,52 @@
+#
+# MMC/SD card drivers
+#
+
+comment "MMC/SD/SDIO Card Drivers"
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on BLOCK
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_BLOCK_BOUNCE
+ bool "Use bounce buffer for simple hosts"
+ depends on MMC_BLOCK
+ default y
+ help
+ SD/MMC is a high latency protocol where it is crucial to
+ send large requests in order to get high performance. Many
+ controllers, however, are restricted to continuous memory
+ (i.e. they can't do scatter-gather), something the kernel
+ rarely can provide.
+
+ Say Y here to help these restricted hosts by bouncing
+ requests back and forth from a large buffer. You will get
+ a big performance gain at the cost of up to 64 KiB of
+ physical memory.
+
+ If unsure, say Y here.
+
+config SDIO_UART
+ tristate "SDIO UART/GPS class support"
+ help
+ SDIO function driver for SDIO cards that implements the UART
+ class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+ tristate "MMC host test driver"
+ default n
+ help
+ Development driver that performs a series of reads and writes
+ to a memory card in order to expose certain well known bugs
+ in host controllers. The tests are executed by writing to the
+ "test" file in sysfs under each card. Note that whatever is
+ on your card will be overwritten by these tests.
+
+ This driver is only of interest to those developing or
+ testing a host driver. Most people should say N here.
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
new file mode 100644
index 0000000..0d40751
--- /dev/null
+++ b/drivers/mmc/card/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for MMC/SD card drivers
+#
+
+ifeq ($(CONFIG_MMC_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
+
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
+
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
new file mode 100644
index 0000000..db7e4c4
--- /dev/null
+++ b/drivers/mmc/card/block.c
@@ -0,0 +1,615 @@
+/*
+ * Block driver for media (i.e., flash cards)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ * Copyright 2005-2008 Pierre Ossman
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Author: Andrew Christian
+ * 28 May 2002
+ */
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include "queue.h"
+
+/*
+ * max 8 partitions per card
+ */
+#define MMC_SHIFT 3
+#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
+
+static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
+
+/*
+ * There is one mmc_blk_data per slot.
+ */
+struct mmc_blk_data {
+ spinlock_t lock;
+ struct gendisk *disk;
+ struct mmc_queue queue;
+
+ unsigned int usage;
+ unsigned int read_only;
+};
+
+static DEFINE_MUTEX(open_lock);
+
+static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+{
+ struct mmc_blk_data *md;
+
+ mutex_lock(&open_lock);
+ md = disk->private_data;
+ if (md && md->usage == 0)
+ md = NULL;
+ if (md)
+ md->usage++;
+ mutex_unlock(&open_lock);
+
+ return md;
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+ mutex_lock(&open_lock);
+ md->usage--;
+ if (md->usage == 0) {
+ int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
+ __clear_bit(devidx, dev_use);
+
+ put_disk(md->disk);
+ kfree(md);
+ }
+ mutex_unlock(&open_lock);
+}
+
+static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (md) {
+ if (md->usage == 2)
+ check_disk_change(bdev);
+ ret = 0;
+
+ if ((mode & FMODE_WRITE) && md->read_only) {
+ mmc_blk_put(md);
+ ret = -EROFS;
+ }
+ }
+
+ return ret;
+}
+
+static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct mmc_blk_data *md = disk->private_data;
+
+ mmc_blk_put(md);
+ return 0;
+}
+
+static int
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->heads = 4;
+ geo->sectors = 16;
+ return 0;
+}
+
+static struct block_device_operations mmc_bdops = {
+ .open = mmc_blk_open,
+ .release = mmc_blk_release,
+ .getgeo = mmc_blk_getgeo,
+ .owner = THIS_MODULE,
+};
+
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+ int err;
+ u32 blocks;
+
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ unsigned int timeout_us;
+
+ struct scatterlist sg;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_APP_CMD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err)
+ return (u32)-1;
+ if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+ return (u32)-1;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ data.timeout_ns = card->csd.tacc_ns * 100;
+ data.timeout_clks = card->csd.tacc_clks * 100;
+
+ timeout_us = data.timeout_ns / 1000;
+ timeout_us += data.timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (timeout_us > 100000) {
+ data.timeout_ns = 100000000;
+ data.timeout_clks = 0;
+ }
+
+ data.blksz = 4;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ sg_init_one(&sg, &blocks, 4);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error || data.error)
+ return (u32)-1;
+
+ blocks = ntohl(blocks);
+
+ return blocks;
+}
+
+static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request brq;
+ int ret = 1;
+
+ mmc_claim_host(card->host);
+
+ do {
+ struct mmc_command cmd;
+ u32 readcmd, writecmd;
+
+ memset(&brq, 0, sizeof(struct mmc_blk_request));
+ brq.mrq.cmd = &brq.cmd;
+ brq.mrq.data = &brq.data;
+
+ brq.cmd.arg = req->sector;
+ if (!mmc_card_blockaddr(card))
+ brq.cmd.arg <<= 9;
+ brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq.data.blksz = 512;
+ brq.stop.opcode = MMC_STOP_TRANSMISSION;
+ brq.stop.arg = 0;
+ brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq.data.blocks = req->nr_sectors;
+
+ if (brq.data.blocks > 1) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
+ */
+ if (!mmc_host_is_spi(card->host)
+ || rq_data_dir(req) == READ)
+ brq.mrq.stop = &brq.stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq.mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+
+ if (rq_data_dir(req) == READ) {
+ brq.cmd.opcode = readcmd;
+ brq.data.flags |= MMC_DATA_READ;
+ } else {
+ brq.cmd.opcode = writecmd;
+ brq.data.flags |= MMC_DATA_WRITE;
+ }
+
+ mmc_set_data_timeout(&brq.data, card);
+
+ brq.data.sg = mq->sg;
+ brq.data.sg_len = mmc_queue_map_sg(mq);
+
+ mmc_queue_bounce_pre(mq);
+
+ mmc_wait_for_req(card->host, &brq.mrq);
+
+ mmc_queue_bounce_post(mq);
+
+ /*
+ * Check for errors here, but don't jump to cmd_err
+ * until later as we need to wait for the card to leave
+ * programming mode even when things go wrong.
+ */
+ if (brq.cmd.error) {
+ printk(KERN_ERR "%s: error %d sending read/write command\n",
+ req->rq_disk->disk_name, brq.cmd.error);
+ }
+
+ if (brq.data.error) {
+ printk(KERN_ERR "%s: error %d transferring data\n",
+ req->rq_disk->disk_name, brq.data.error);
+ }
+
+ if (brq.stop.error) {
+ printk(KERN_ERR "%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, brq.stop.error);
+ }
+
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ do {
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ goto cmd_err;
+ }
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd.resp[0]) == 7));
+
+#if 0
+ if (cmd.resp[0] & ~0x00000900)
+ printk(KERN_ERR "%s: status = %08x\n",
+ req->rq_disk->disk_name, cmd.resp[0]);
+ if (mmc_decode_status(cmd.resp))
+ goto cmd_err;
+#endif
+ }
+
+ if (brq.cmd.error || brq.data.error || brq.stop.error)
+ goto cmd_err;
+
+ /*
+ * A block was successfully transferred.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ } while (ret);
+
+ mmc_release_host(card->host);
+
+ return 1;
+
+ cmd_err:
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
+ * If the card is not SD, we can still ok written sectors
+ * as reported by the controller (which might be less than
+ * the real number of written sectors, but never more).
+ *
+ * For reads we just fail the entire chunk as that should
+ * be safe in all cases.
+ */
+ if (rq_data_dir(req) != READ) {
+ if (mmc_card_sd(card)) {
+ u32 blocks;
+
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1) {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, blocks << 9);
+ spin_unlock_irq(&md->lock);
+ }
+ } else {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+ }
+
+ mmc_release_host(card->host);
+
+ spin_lock_irq(&md->lock);
+ while (ret)
+ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ spin_unlock_irq(&md->lock);
+
+ return 0;
+}
+
+
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+ return mmc_card_readonly(card) ||
+ !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+ struct mmc_blk_data *md;
+ int devidx, ret;
+
+ devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
+ if (devidx >= MMC_NUM_MINORS)
+ return ERR_PTR(-ENOSPC);
+ __set_bit(devidx, dev_use);
+
+ md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ if (!md) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+
+ /*
+ * Set the read-only status based on the supported commands
+ * and the write protect switch.
+ */
+ md->read_only = mmc_blk_readonly(card);
+
+ md->disk = alloc_disk(1 << MMC_SHIFT);
+ if (md->disk == NULL) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ spin_lock_init(&md->lock);
+ md->usage = 1;
+
+ ret = mmc_init_queue(&md->queue, card, &md->lock);
+ if (ret)
+ goto err_putdisk;
+
+ md->queue.issue_fn = mmc_blk_issue_rq;
+ md->queue.data = md;
+
+ md->disk->major = MMC_BLOCK_MAJOR;
+ md->disk->first_minor = devidx << MMC_SHIFT;
+ md->disk->fops = &mmc_bdops;
+ md->disk->private_data = md;
+ md->disk->queue = md->queue.queue;
+ md->disk->driverfs_dev = &card->dev;
+
+ /*
+ * As discussed on lkml, GENHD_FL_REMOVABLE should:
+ *
+ * - be set for removable media with permanent block devices
+ * - be unset for removable block devices with permanent media
+ *
+ * Since MMC block devices clearly fall under the second
+ * case, we do not set GENHD_FL_REMOVABLE. Userspace
+ * should use the block device creation/destruction hotplug
+ * messages to tell when the card is present.
+ */
+
+ sprintf(md->disk->disk_name, "mmcblk%d", devidx);
+
+ blk_queue_hardsect_size(md->queue.queue, 512);
+
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+ /*
+ * The EXT_CSD sector count is in number or 512 byte
+ * sectors.
+ */
+ set_capacity(md->disk, card->ext_csd.sectors);
+ } else {
+ /*
+ * The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ set_capacity(md->disk,
+ card->csd.capacity << (card->csd.read_blkbits - 9));
+ }
+ return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ return ERR_PTR(ret);
+}
+
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
+{
+ struct mmc_command cmd;
+ int err;
+
+ /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
+ if (mmc_card_blockaddr(card))
+ return 0;
+
+ mmc_claim_host(card->host);
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = 512;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ mmc_release_host(card->host);
+
+ if (err) {
+ printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
+ md->disk->disk_name, cmd.arg, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mmc_blk_probe(struct mmc_card *card)
+{
+ struct mmc_blk_data *md;
+ int err;
+
+ char cap_str[10];
+
+ /*
+ * Check that the card supports the command class(es) we need.
+ */
+ if (!(card->csd.cmdclass & CCC_BLOCK_READ))
+ return -ENODEV;
+
+ md = mmc_blk_alloc(card);
+ if (IS_ERR(md))
+ return PTR_ERR(md);
+
+ err = mmc_blk_set_blksize(md, card);
+ if (err)
+ goto out;
+
+ string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ printk(KERN_INFO "%s: %s %s %s %s\n",
+ md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
+ cap_str, md->read_only ? "(ro)" : "");
+
+ mmc_set_drvdata(card, md);
+ add_disk(md->disk);
+ return 0;
+
+ out:
+ mmc_blk_put(md);
+
+ return err;
+}
+
+static void mmc_blk_remove(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ /* Stop new requests from getting into the queue */
+ del_gendisk(md->disk);
+
+ /* Then flush out any already in there */
+ mmc_cleanup_queue(&md->queue);
+
+ mmc_blk_put(md);
+ }
+ mmc_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ mmc_queue_suspend(&md->queue);
+ }
+ return 0;
+}
+
+static int mmc_blk_resume(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ mmc_blk_set_blksize(md, card);
+ mmc_queue_resume(&md->queue);
+ }
+ return 0;
+}
+#else
+#define mmc_blk_suspend NULL
+#define mmc_blk_resume NULL
+#endif
+
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmcblk",
+ },
+ .probe = mmc_blk_probe,
+ .remove = mmc_blk_remove,
+ .suspend = mmc_blk_suspend,
+ .resume = mmc_blk_resume,
+};
+
+static int __init mmc_blk_init(void)
+{
+ int res;
+
+ res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ if (res)
+ goto out;
+
+ res = mmc_register_driver(&mmc_driver);
+ if (res)
+ goto out2;
+
+ return 0;
+ out2:
+ unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ out:
+ return res;
+}
+
+static void __exit mmc_blk_exit(void)
+{
+ mmc_unregister_driver(&mmc_driver);
+ unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+}
+
+module_init(mmc_blk_init);
+module_exit(mmc_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
new file mode 100644
index 0000000..b9f1e84
--- /dev/null
+++ b/drivers/mmc/card/mmc_test.c
@@ -0,0 +1,1211 @@
+/*
+ * linux/drivers/mmc/card/mmc_test.c
+ *
+ * Copyright 2007-2008 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+
+#include <linux/scatterlist.h>
+
+#define RESULT_OK 0
+#define RESULT_FAIL 1
+#define RESULT_UNSUP_HOST 2
+#define RESULT_UNSUP_CARD 3
+
+#define BUFFER_ORDER 2
+#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
+
+struct mmc_test_card {
+ struct mmc_card *card;
+
+ u8 scratch[BUFFER_SIZE];
+ u8 *buffer;
+#ifdef CONFIG_HIGHMEM
+ struct page *highmem;
+#endif
+};
+
+/*******************************************************************/
+/* General helper functions */
+/*******************************************************************/
+
+/*
+ * Configure correct block size in card
+ */
+static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
+{
+ struct mmc_command cmd;
+ int ret;
+
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = size;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Fill in the mmc_request structure given a set of transfer parameters.
+ */
+static void mmc_test_prepare_mrq(struct mmc_test_card *test,
+ struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
+{
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+
+ if (blocks > 1) {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
+ } else {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq->cmd->arg = dev_addr;
+ mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ if (blocks == 1)
+ mrq->stop = NULL;
+ else {
+ mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+ mrq->stop->arg = 0;
+ mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ mrq->data->blksz = blksz;
+ mrq->data->blocks = blocks;
+ mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mrq->data->sg = sg;
+ mrq->data->sg_len = sg_len;
+
+ mmc_set_data_timeout(mrq->data, test->card);
+}
+
+/*
+ * Wait for the card to finish the busy state
+ */
+static int mmc_test_wait_busy(struct mmc_test_card *test)
+{
+ int ret, busy;
+ struct mmc_command cmd;
+
+ busy = 0;
+ do {
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = test->card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
+ if (ret)
+ break;
+
+ if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
+ busy = 1;
+ printk(KERN_INFO "%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(test->card->host));
+ }
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
+
+ return ret;
+}
+
+/*
+ * Transfer a single sector of kernel addressable data
+ */
+static int mmc_test_buffer_transfer(struct mmc_test_card *test,
+ u8 *buffer, unsigned addr, unsigned blksz, int write)
+{
+ int ret;
+
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+
+ struct scatterlist sg;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+ memset(&stop, 0, sizeof(struct mmc_command));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ sg_init_one(&sg, buffer, blksz);
+
+ mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
+
+ mmc_wait_for_req(test->card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ ret = mmc_test_wait_busy(test);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*******************************************************************/
+/* Test preparation and cleanup */
+/*******************************************************************/
+
+/*
+ * Fill the first couple of sectors of the card with known data
+ * so that bad reads/writes can be detected
+ */
+static int __mmc_test_prepare(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ if (write)
+ memset(test->buffer, 0xDF, 512);
+ else {
+ for (i = 0;i < 512;i++)
+ test->buffer[i] = i;
+ }
+
+ for (i = 0;i < BUFFER_SIZE / 512;i++) {
+ ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_prepare_write(struct mmc_test_card *test)
+{
+ return __mmc_test_prepare(test, 1);
+}
+
+static int mmc_test_prepare_read(struct mmc_test_card *test)
+{
+ return __mmc_test_prepare(test, 0);
+}
+
+static int mmc_test_cleanup(struct mmc_test_card *test)
+{
+ int ret, i;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ memset(test->buffer, 0, 512);
+
+ for (i = 0;i < BUFFER_SIZE / 512;i++) {
+ ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*******************************************************************/
+/* Test execution helpers */
+/*******************************************************************/
+
+/*
+ * Modifies the mmc_request to perform the "short transfer" tests
+ */
+static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
+ struct mmc_request *mrq, int write)
+{
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+ if (mrq->data->blocks > 1) {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ mrq->stop = NULL;
+ } else {
+ mrq->cmd->opcode = MMC_SEND_STATUS;
+ mrq->cmd->arg = test->card->rca << 16;
+ }
+}
+
+/*
+ * Checks that a normal transfer didn't have any errors
+ */
+static int mmc_test_check_result(struct mmc_test_card *test,
+ struct mmc_request *mrq)
+{
+ int ret;
+
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+ ret = 0;
+
+ if (!ret && mrq->cmd->error)
+ ret = mrq->cmd->error;
+ if (!ret && mrq->data->error)
+ ret = mrq->data->error;
+ if (!ret && mrq->stop && mrq->stop->error)
+ ret = mrq->stop->error;
+ if (!ret && mrq->data->bytes_xfered !=
+ mrq->data->blocks * mrq->data->blksz)
+ ret = RESULT_FAIL;
+
+ if (ret == -EINVAL)
+ ret = RESULT_UNSUP_HOST;
+
+ return ret;
+}
+
+/*
+ * Checks that a "short transfer" behaved as expected
+ */
+static int mmc_test_check_broken_result(struct mmc_test_card *test,
+ struct mmc_request *mrq)
+{
+ int ret;
+
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+ ret = 0;
+
+ if (!ret && mrq->cmd->error)
+ ret = mrq->cmd->error;
+ if (!ret && mrq->data->error == 0)
+ ret = RESULT_FAIL;
+ if (!ret && mrq->data->error != -ETIMEDOUT)
+ ret = mrq->data->error;
+ if (!ret && mrq->stop && mrq->stop->error)
+ ret = mrq->stop->error;
+ if (mrq->data->blocks > 1) {
+ if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
+ ret = RESULT_FAIL;
+ } else {
+ if (!ret && mrq->data->bytes_xfered > 0)
+ ret = RESULT_FAIL;
+ }
+
+ if (ret == -EINVAL)
+ ret = RESULT_UNSUP_HOST;
+
+ return ret;
+}
+
+/*
+ * Tests a basic transfer with certain parameters
+ */
+static int mmc_test_simple_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+ unsigned blocks, unsigned blksz, int write)
+{
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+ memset(&stop, 0, sizeof(struct mmc_command));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+
+ mmc_wait_for_req(test->card->host, &mrq);
+
+ mmc_test_wait_busy(test);
+
+ return mmc_test_check_result(test, &mrq);
+}
+
+/*
+ * Tests a transfer where the card will fail completely or partly
+ */
+static int mmc_test_broken_transfer(struct mmc_test_card *test,
+ unsigned blocks, unsigned blksz, int write)
+{
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+
+ struct scatterlist sg;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+ memset(&stop, 0, sizeof(struct mmc_command));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ sg_init_one(&sg, test->buffer, blocks * blksz);
+
+ mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
+ mmc_test_prepare_broken_mrq(test, &mrq, write);
+
+ mmc_wait_for_req(test->card->host, &mrq);
+
+ mmc_test_wait_busy(test);
+
+ return mmc_test_check_broken_result(test, &mrq);
+}
+
+/*
+ * Does a complete transfer test where data is also validated
+ *
+ * Note: mmc_test_prepare() must have been done before this call
+ */
+static int mmc_test_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+ unsigned blocks, unsigned blksz, int write)
+{
+ int ret, i;
+ unsigned long flags;
+
+ if (write) {
+ for (i = 0;i < blocks * blksz;i++)
+ test->scratch[i] = i;
+ } else {
+ memset(test->scratch, 0, BUFFER_SIZE);
+ }
+ local_irq_save(flags);
+ sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+ local_irq_restore(flags);
+
+ ret = mmc_test_set_blksize(test, blksz);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ if (ret)
+ return ret;
+
+ if (write) {
+ int sectors;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ sectors = (blocks * blksz + 511) / 512;
+ if ((sectors * 512) == (blocks * blksz))
+ sectors++;
+
+ if ((sectors * 512) > BUFFER_SIZE)
+ return -EINVAL;
+
+ memset(test->buffer, 0, sectors * 512);
+
+ for (i = 0;i < sectors;i++) {
+ ret = mmc_test_buffer_transfer(test,
+ test->buffer + i * 512,
+ dev_addr + i * 512, 512, 0);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0;i < blocks * blksz;i++) {
+ if (test->buffer[i] != (u8)i)
+ return RESULT_FAIL;
+ }
+
+ for (;i < sectors * 512;i++) {
+ if (test->buffer[i] != 0xDF)
+ return RESULT_FAIL;
+ }
+ } else {
+ local_irq_save(flags);
+ sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+ local_irq_restore(flags);
+ for (i = 0;i < blocks * blksz;i++) {
+ if (test->scratch[i] != (u8)i)
+ return RESULT_FAIL;
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************/
+/* Tests */
+/*******************************************************************/
+
+struct mmc_test_case {
+ const char *name;
+
+ int (*prepare)(struct mmc_test_card *);
+ int (*run)(struct mmc_test_card *);
+ int (*cleanup)(struct mmc_test_card *);
+};
+
+static int mmc_test_basic_write(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_basic_read(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_verify_write(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_verify_read(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_write(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_one(&sg, test->buffer, size);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_read(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_one(&sg, test->buffer, size);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_pow2_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.write_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 1; i < 512;i <<= 1) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_pow2_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.read_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 1; i < 512;i <<= 1) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_weird_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.write_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 3; i < 512;i += 7) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_weird_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.read_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 3; i < 512;i += 7) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ for (i = 1;i < 4;i++) {
+ sg_init_one(&sg, test->buffer + i, 512);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ for (i = 1;i < 4;i++) {
+ sg_init_one(&sg, test->buffer + i, 512);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_multi_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ for (i = 1;i < 4;i++) {
+ sg_init_one(&sg, test->buffer + i, size);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_multi_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ for (i = 1;i < 4;i++) {
+ sg_init_one(&sg, test->buffer + i, size);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_xfersize_write(struct mmc_test_card *test)
+{
+ int ret;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_broken_transfer(test, 1, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_xfersize_read(struct mmc_test_card *test)
+{
+ int ret;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_broken_transfer(test, 1, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
+{
+ int ret;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_broken_transfer(test, 2, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
+{
+ int ret;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_broken_transfer(test, 2, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+
+static int mmc_test_write_high(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, 512, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_read_high(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, 512, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_write_high(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, size, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_read_high(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, size, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+static const struct mmc_test_case mmc_test_cases[] = {
+ {
+ .name = "Basic write (no data verification)",
+ .run = mmc_test_basic_write,
+ },
+
+ {
+ .name = "Basic read (no data verification)",
+ .run = mmc_test_basic_read,
+ },
+
+ {
+ .name = "Basic write (with data verification)",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_verify_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Basic read (with data verification)",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_verify_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_multi_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_multi_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Power of two block writes",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_pow2_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Power of two block reads",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_pow2_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Weird sized block writes",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_weird_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Weird sized block reads",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_weird_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned multi-block write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_multi_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned multi-block read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_multi_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Correct xfer_size at write (start failure)",
+ .run = mmc_test_xfersize_write,
+ },
+
+ {
+ .name = "Correct xfer_size at read (start failure)",
+ .run = mmc_test_xfersize_read,
+ },
+
+ {
+ .name = "Correct xfer_size at write (midway failure)",
+ .run = mmc_test_multi_xfersize_write,
+ },
+
+ {
+ .name = "Correct xfer_size at read (midway failure)",
+ .run = mmc_test_multi_xfersize_read,
+ },
+
+#ifdef CONFIG_HIGHMEM
+
+ {
+ .name = "Highmem write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_write_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Highmem read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_read_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block highmem write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_multi_write_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block highmem read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_multi_read_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+#endif /* CONFIG_HIGHMEM */
+
+};
+
+static DEFINE_MUTEX(mmc_test_lock);
+
+static void mmc_test_run(struct mmc_test_card *test, int testcase)
+{
+ int i, ret;
+
+ printk(KERN_INFO "%s: Starting tests of card %s...\n",
+ mmc_hostname(test->card->host), mmc_card_id(test->card));
+
+ mmc_claim_host(test->card->host);
+
+ for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+ if (testcase && ((i + 1) != testcase))
+ continue;
+
+ printk(KERN_INFO "%s: Test case %d. %s...\n",
+ mmc_hostname(test->card->host), i + 1,
+ mmc_test_cases[i].name);
+
+ if (mmc_test_cases[i].prepare) {
+ ret = mmc_test_cases[i].prepare(test);
+ if (ret) {
+ printk(KERN_INFO "%s: Result: Prepare "
+ "stage failed! (%d)\n",
+ mmc_hostname(test->card->host),
+ ret);
+ continue;
+ }
+ }
+
+ ret = mmc_test_cases[i].run(test);
+ switch (ret) {
+ case RESULT_OK:
+ printk(KERN_INFO "%s: Result: OK\n",
+ mmc_hostname(test->card->host));
+ break;
+ case RESULT_FAIL:
+ printk(KERN_INFO "%s: Result: FAILED\n",
+ mmc_hostname(test->card->host));
+ break;
+ case RESULT_UNSUP_HOST:
+ printk(KERN_INFO "%s: Result: UNSUPPORTED "
+ "(by host)\n",
+ mmc_hostname(test->card->host));
+ break;
+ case RESULT_UNSUP_CARD:
+ printk(KERN_INFO "%s: Result: UNSUPPORTED "
+ "(by card)\n",
+ mmc_hostname(test->card->host));
+ break;
+ default:
+ printk(KERN_INFO "%s: Result: ERROR (%d)\n",
+ mmc_hostname(test->card->host), ret);
+ }
+
+ if (mmc_test_cases[i].cleanup) {
+ ret = mmc_test_cases[i].cleanup(test);
+ if (ret) {
+ printk(KERN_INFO "%s: Warning: Cleanup "
+ "stage failed! (%d)\n",
+ mmc_hostname(test->card->host),
+ ret);
+ }
+ }
+ }
+
+ mmc_release_host(test->card->host);
+
+ printk(KERN_INFO "%s: Tests completed.\n",
+ mmc_hostname(test->card->host));
+}
+
+static ssize_t mmc_test_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ mutex_lock(&mmc_test_lock);
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+static ssize_t mmc_test_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_card *card;
+ struct mmc_test_card *test;
+ int testcase;
+
+ card = container_of(dev, struct mmc_card, dev);
+
+ testcase = simple_strtol(buf, NULL, 10);
+
+ test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
+ if (!test)
+ return -ENOMEM;
+
+ test->card = card;
+
+ test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+#ifdef CONFIG_HIGHMEM
+ test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+ if (test->buffer && test->highmem) {
+#else
+ if (test->buffer) {
+#endif
+ mutex_lock(&mmc_test_lock);
+ mmc_test_run(test, testcase);
+ mutex_unlock(&mmc_test_lock);
+ }
+
+#ifdef CONFIG_HIGHMEM
+ __free_pages(test->highmem, BUFFER_ORDER);
+#endif
+ kfree(test->buffer);
+ kfree(test);
+
+ return count;
+}
+
+static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);
+
+static int mmc_test_probe(struct mmc_card *card)
+{
+ int ret;
+
+ if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
+ return -ENODEV;
+
+ ret = device_create_file(&card->dev, &dev_attr_test);
+ if (ret)
+ return ret;
+
+ dev_info(&card->dev, "Card claimed for testing.\n");
+
+ return 0;
+}
+
+static void mmc_test_remove(struct mmc_card *card)
+{
+ device_remove_file(&card->dev, &dev_attr_test);
+}
+
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmc_test",
+ },
+ .probe = mmc_test_probe,
+ .remove = mmc_test_remove,
+};
+
+static int __init mmc_test_init(void)
+{
+ return mmc_register_driver(&mmc_driver);
+}
+
+static void __exit mmc_test_exit(void)
+{
+ mmc_unregister_driver(&mmc_driver);
+}
+
+module_init(mmc_test_init);
+module_exit(mmc_test_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
+MODULE_AUTHOR("Pierre Ossman");
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
new file mode 100644
index 0000000..7a72e75
--- /dev/null
+++ b/drivers/mmc/card/queue.c
@@ -0,0 +1,370 @@
+/*
+ * linux/drivers/mmc/card/queue.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include "queue.h"
+
+#define MMC_QUEUE_BOUNCESZ 65536
+
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+
+/*
+ * Prepare a MMC request. This just filters out odd stuff.
+ */
+static int mmc_prep_request(struct request_queue *q, struct request *req)
+{
+ /*
+ * We only like normal block requests.
+ */
+ if (!blk_fs_request(req)) {
+ blk_dump_rq_flags(req, "MMC bad request");
+ return BLKPREP_KILL;
+ }
+
+ req->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+static int mmc_queue_thread(void *d)
+{
+ struct mmc_queue *mq = d;
+ struct request_queue *q = mq->queue;
+
+ current->flags |= PF_MEMALLOC;
+
+ down(&mq->thread_sem);
+ do {
+ struct request *req = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!blk_queue_plugged(q))
+ req = elv_next_request(q);
+ mq->req = req;
+ spin_unlock_irq(q->queue_lock);
+
+ if (!req) {
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+ up(&mq->thread_sem);
+ schedule();
+ down(&mq->thread_sem);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+
+ mq->issue_fn(mq, req);
+ } while (1);
+ up(&mq->thread_sem);
+
+ return 0;
+}
+
+/*
+ * Generic MMC request handler. This is called for any queue on a
+ * particular host. When the host is not busy, we look for a request
+ * on any queue on this host, and attempt to issue it. This may
+ * not be the queue we were asked to process.
+ */
+static void mmc_request(struct request_queue *q)
+{
+ struct mmc_queue *mq = q->queuedata;
+ struct request *req;
+ int ret;
+
+ if (!mq) {
+ printk(KERN_ERR "MMC: killing requests for dead queue\n");
+ while ((req = elv_next_request(q)) != NULL) {
+ do {
+ ret = __blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
+ } while (ret);
+ }
+ return;
+ }
+
+ if (!mq->req)
+ wake_up_process(mq->thread);
+}
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+ int ret;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = *mmc_dev(host)->dma_mask;
+
+ mq->card = card;
+ mq->queue = blk_init_queue(mmc_request, lock);
+ if (!mq->queue)
+ return -ENOMEM;
+
+ mq->queue->queuedata = mq;
+ mq->req = NULL;
+
+ blk_queue_prep_rq(mq->queue, mmc_prep_request);
+ blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+ if (host->max_hw_segs == 1) {
+ unsigned int bouncesz;
+
+ bouncesz = MMC_QUEUE_BOUNCESZ;
+
+ if (bouncesz > host->max_req_size)
+ bouncesz = host->max_req_size;
+ if (bouncesz > host->max_seg_size)
+ bouncesz = host->max_seg_size;
+ if (bouncesz > (host->max_blk_count * 512))
+ bouncesz = host->max_blk_count * 512;
+
+ if (bouncesz > 512) {
+ mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mq->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce buffer\n",
+ mmc_card_name(card));
+ }
+ }
+
+ if (mq->bounce_buf) {
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
+ blk_queue_max_sectors(mq->queue, bouncesz / 512);
+ blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
+ blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
+ blk_queue_max_segment_size(mq->queue, bouncesz);
+
+ mq->sg = kmalloc(sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!mq->sg) {
+ ret = -ENOMEM;
+ goto cleanup_queue;
+ }
+ sg_init_table(mq->sg, 1);
+
+ mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
+ bouncesz / 512, GFP_KERNEL);
+ if (!mq->bounce_sg) {
+ ret = -ENOMEM;
+ goto cleanup_queue;
+ }
+ sg_init_table(mq->bounce_sg, bouncesz / 512);
+ }
+ }
+#endif
+
+ if (!mq->bounce_buf) {
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
+ blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ mq->sg = kmalloc(sizeof(struct scatterlist) *
+ host->max_phys_segs, GFP_KERNEL);
+ if (!mq->sg) {
+ ret = -ENOMEM;
+ goto cleanup_queue;
+ }
+ sg_init_table(mq->sg, host->max_phys_segs);
+ }
+
+ init_MUTEX(&mq->thread_sem);
+
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
+ if (IS_ERR(mq->thread)) {
+ ret = PTR_ERR(mq->thread);
+ goto free_bounce_sg;
+ }
+
+ return 0;
+ free_bounce_sg:
+ if (mq->bounce_sg)
+ kfree(mq->bounce_sg);
+ mq->bounce_sg = NULL;
+ cleanup_queue:
+ if (mq->sg)
+ kfree(mq->sg);
+ mq->sg = NULL;
+ if (mq->bounce_buf)
+ kfree(mq->bounce_buf);
+ mq->bounce_buf = NULL;
+ blk_cleanup_queue(mq->queue);
+ return ret;
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+
+ /* Mark that we should start throwing out stragglers */
+ spin_lock_irqsave(q->queue_lock, flags);
+ q->queuedata = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* Make sure the queue isn't suspended, as that will deadlock */
+ mmc_queue_resume(mq);
+
+ /* Then terminate our worker thread */
+ kthread_stop(mq->thread);
+
+ if (mq->bounce_sg)
+ kfree(mq->bounce_sg);
+ mq->bounce_sg = NULL;
+
+ kfree(mq->sg);
+ mq->sg = NULL;
+
+ if (mq->bounce_buf)
+ kfree(mq->bounce_buf);
+ mq->bounce_buf = NULL;
+
+ blk_cleanup_queue(mq->queue);
+
+ mq->card = NULL;
+}
+EXPORT_SYMBOL(mmc_cleanup_queue);
+
+/**
+ * mmc_queue_suspend - suspend a MMC request queue
+ * @mq: MMC queue to suspend
+ *
+ * Stop the block request queue, and wait for our thread to
+ * complete any outstanding requests. This ensures that we
+ * won't suspend while a request is being processed.
+ */
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+
+ if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
+ mq->flags |= MMC_QUEUE_SUSPENDED;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ down(&mq->thread_sem);
+ }
+}
+
+/**
+ * mmc_queue_resume - resume a previously suspended MMC request queue
+ * @mq: MMC queue to resume
+ */
+void mmc_queue_resume(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+
+ if (mq->flags & MMC_QUEUE_SUSPENDED) {
+ mq->flags &= ~MMC_QUEUE_SUSPENDED;
+
+ up(&mq->thread_sem);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+{
+ unsigned int sg_len;
+ size_t buflen;
+ struct scatterlist *sg;
+ int i;
+
+ if (!mq->bounce_buf)
+ return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+
+ BUG_ON(!mq->bounce_sg);
+
+ sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+
+ mq->bounce_sg_len = sg_len;
+
+ buflen = 0;
+ for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ buflen += sg->length;
+
+ sg_init_one(mq->sg, mq->bounce_buf, buflen);
+
+ return 1;
+}
+
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
+void mmc_queue_bounce_pre(struct mmc_queue *mq)
+{
+ unsigned long flags;
+
+ if (!mq->bounce_buf)
+ return;
+
+ if (rq_data_dir(mq->req) != WRITE)
+ return;
+
+ local_irq_save(flags);
+ sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
+ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
+}
+
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
+void mmc_queue_bounce_post(struct mmc_queue *mq)
+{
+ unsigned long flags;
+
+ if (!mq->bounce_buf)
+ return;
+
+ if (rq_data_dir(mq->req) != READ)
+ return;
+
+ local_irq_save(flags);
+ sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
+ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
+}
+
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
new file mode 100644
index 0000000..64e66e0
--- /dev/null
+++ b/drivers/mmc/card/queue.h
@@ -0,0 +1,31 @@
+#ifndef MMC_QUEUE_H
+#define MMC_QUEUE_H
+
+struct request;
+struct task_struct;
+
+struct mmc_queue {
+ struct mmc_card *card;
+ struct task_struct *thread;
+ struct semaphore thread_sem;
+ unsigned int flags;
+ struct request *req;
+ int (*issue_fn)(struct mmc_queue *, struct request *);
+ void *data;
+ struct request_queue *queue;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+};
+
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern void mmc_cleanup_queue(struct mmc_queue *);
+extern void mmc_queue_suspend(struct mmc_queue *);
+extern void mmc_queue_resume(struct mmc_queue *);
+
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
+extern void mmc_queue_bounce_pre(struct mmc_queue *);
+extern void mmc_queue_bounce_post(struct mmc_queue *);
+
+#endif
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
new file mode 100644
index 0000000..78ad487
--- /dev/null
+++ b/drivers/mmc/card/sdio_uart.c
@@ -0,0 +1,1161 @@
+/*
+ * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
+ *
+ * Based on drivers/serial/8250.c and drivers/serial/serial_core.c
+ * by Russell King.
+ *
+ * Author: Nicolas Pitre
+ * Created: June 15, 2007
+ * Copyright: MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+/*
+ * Note: Although this driver assumes a 16550A-like UART implementation,
+ * it is not possible to leverage the common 8250/16550 driver, nor the
+ * core UART infrastructure, as they assumes direct access to the hardware
+ * registers, often under a spinlock. This is not possible in the SDIO
+ * context as SDIO access functions must be able to sleep.
+ *
+ * Because we need to lock the SDIO host to ensure an exclusive access to
+ * the card, we simply rely on that lock to also prevent and serialize
+ * concurrent access to the same port.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/gfp.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+
+#define UART_NR 8 /* Number of UARTs this driver can handle */
+
+
+#define UART_XMIT_SIZE PAGE_SIZE
+#define WAKEUP_CHARS 256
+
+#define circ_empty(circ) ((circ)->head == (circ)->tail)
+#define circ_clear(circ) ((circ)->head = (circ)->tail = 0)
+
+#define circ_chars_pending(circ) \
+ (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
+
+#define circ_chars_free(circ) \
+ (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
+
+
+struct uart_icount {
+ __u32 cts;
+ __u32 dsr;
+ __u32 rng;
+ __u32 dcd;
+ __u32 rx;
+ __u32 tx;
+ __u32 frame;
+ __u32 overrun;
+ __u32 parity;
+ __u32 brk;
+};
+
+struct sdio_uart_port {
+ struct kref kref;
+ struct tty_struct *tty;
+ unsigned int index;
+ unsigned int opened;
+ struct mutex open_lock;
+ struct sdio_func *func;
+ struct mutex func_lock;
+ struct task_struct *in_sdio_uart_irq;
+ unsigned int regs_offset;
+ struct circ_buf xmit;
+ spinlock_t write_lock;
+ struct uart_icount icount;
+ unsigned int uartclk;
+ unsigned int mctrl;
+ unsigned int read_status_mask;
+ unsigned int ignore_status_mask;
+ unsigned char x_char;
+ unsigned char ier;
+ unsigned char lcr;
+};
+
+static struct sdio_uart_port *sdio_uart_table[UART_NR];
+static DEFINE_SPINLOCK(sdio_uart_table_lock);
+
+static int sdio_uart_add_port(struct sdio_uart_port *port)
+{
+ int index, ret = -EBUSY;
+
+ kref_init(&port->kref);
+ mutex_init(&port->open_lock);
+ mutex_init(&port->func_lock);
+ spin_lock_init(&port->write_lock);
+
+ spin_lock(&sdio_uart_table_lock);
+ for (index = 0; index < UART_NR; index++) {
+ if (!sdio_uart_table[index]) {
+ port->index = index;
+ sdio_uart_table[index] = port;
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock(&sdio_uart_table_lock);
+
+ return ret;
+}
+
+static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
+{
+ struct sdio_uart_port *port;
+
+ if (index >= UART_NR)
+ return NULL;
+
+ spin_lock(&sdio_uart_table_lock);
+ port = sdio_uart_table[index];
+ if (port)
+ kref_get(&port->kref);
+ spin_unlock(&sdio_uart_table_lock);
+
+ return port;
+}
+
+static void sdio_uart_port_destroy(struct kref *kref)
+{
+ struct sdio_uart_port *port =
+ container_of(kref, struct sdio_uart_port, kref);
+ kfree(port);
+}
+
+static void sdio_uart_port_put(struct sdio_uart_port *port)
+{
+ kref_put(&port->kref, sdio_uart_port_destroy);
+}
+
+static void sdio_uart_port_remove(struct sdio_uart_port *port)
+{
+ struct sdio_func *func;
+
+ BUG_ON(sdio_uart_table[port->index] != port);
+
+ spin_lock(&sdio_uart_table_lock);
+ sdio_uart_table[port->index] = NULL;
+ spin_unlock(&sdio_uart_table_lock);
+
+ /*
+ * We're killing a port that potentially still is in use by
+ * the tty layer. Be careful to prevent any further access
+ * to the SDIO function and arrange for the tty layer to
+ * give up on that port ASAP.
+ * Beware: the lock ordering is critical.
+ */
+ mutex_lock(&port->open_lock);
+ mutex_lock(&port->func_lock);
+ func = port->func;
+ sdio_claim_host(func);
+ port->func = NULL;
+ mutex_unlock(&port->func_lock);
+ if (port->opened)
+ tty_hangup(port->tty);
+ mutex_unlock(&port->open_lock);
+ sdio_release_irq(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ sdio_uart_port_put(port);
+}
+
+static int sdio_uart_claim_func(struct sdio_uart_port *port)
+{
+ mutex_lock(&port->func_lock);
+ if (unlikely(!port->func)) {
+ mutex_unlock(&port->func_lock);
+ return -ENODEV;
+ }
+ if (likely(port->in_sdio_uart_irq != current))
+ sdio_claim_host(port->func);
+ mutex_unlock(&port->func_lock);
+ return 0;
+}
+
+static inline void sdio_uart_release_func(struct sdio_uart_port *port)
+{
+ if (likely(port->in_sdio_uart_irq != current))
+ sdio_release_host(port->func);
+}
+
+static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+{
+ unsigned char c;
+ c = sdio_readb(port->func, port->regs_offset + offset, NULL);
+ return c;
+}
+
+static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
+{
+ sdio_writeb(port->func, value, port->regs_offset + offset, NULL);
+}
+
+static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
+{
+ unsigned char status;
+ unsigned int ret;
+
+ status = sdio_in(port, UART_MSR);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void sdio_uart_write_mctrl(struct sdio_uart_port *port, unsigned int mctrl)
+{
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ sdio_out(port, UART_MCR, mcr);
+}
+
+static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
+ unsigned int set, unsigned int clear)
+{
+ unsigned int old;
+
+ old = port->mctrl;
+ port->mctrl = (old & ~clear) | set;
+ if (old != port->mctrl)
+ sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+#define sdio_uart_set_mctrl(port, x) sdio_uart_update_mctrl(port, x, 0)
+#define sdio_uart_clear_mctrl(port, x) sdio_uart_update_mctrl(port, 0, x)
+
+static void sdio_uart_change_speed(struct sdio_uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned char cval, fcr = 0;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ for (;;) {
+ baud = tty_termios_baud_rate(termios);
+ if (baud == 0)
+ baud = 9600; /* Special case: B0 rate. */
+ if (baud <= port->uartclk)
+ break;
+ /*
+ * Oops, the quotient was zero. Try again with the old
+ * baud rate if possible, otherwise default to 9600.
+ */
+ termios->c_cflag &= ~CBAUD;
+ if (old) {
+ termios->c_cflag |= old->c_cflag & CBAUD;
+ old = NULL;
+ } else
+ termios->c_cflag |= B9600;
+ }
+ quot = (2 * port->uartclk + baud) / (2 * baud);
+
+ if (baud < 2400)
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+
+ port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ port->ier &= ~UART_IER_MSI;
+ if ((termios->c_cflag & CRTSCTS) || !(termios->c_cflag & CLOCAL))
+ port->ier |= UART_IER_MSI;
+
+ port->lcr = cval;
+
+ sdio_out(port, UART_IER, port->ier);
+ sdio_out(port, UART_LCR, cval | UART_LCR_DLAB);
+ sdio_out(port, UART_DLL, quot & 0xff);
+ sdio_out(port, UART_DLM, quot >> 8);
+ sdio_out(port, UART_LCR, cval);
+ sdio_out(port, UART_FCR, fcr);
+
+ sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+static void sdio_uart_start_tx(struct sdio_uart_port *port)
+{
+ if (!(port->ier & UART_IER_THRI)) {
+ port->ier |= UART_IER_THRI;
+ sdio_out(port, UART_IER, port->ier);
+ }
+}
+
+static void sdio_uart_stop_tx(struct sdio_uart_port *port)
+{
+ if (port->ier & UART_IER_THRI) {
+ port->ier &= ~UART_IER_THRI;
+ sdio_out(port, UART_IER, port->ier);
+ }
+}
+
+static void sdio_uart_stop_rx(struct sdio_uart_port *port)
+{
+ port->ier &= ~UART_IER_RLSI;
+ port->read_status_mask &= ~UART_LSR_DR;
+ sdio_out(port, UART_IER, port->ier);
+}
+
+static void sdio_uart_receive_chars(struct sdio_uart_port *port, unsigned int *status)
+{
+ struct tty_struct *tty = port->tty;
+ unsigned int ch, flag;
+ int max_count = 256;
+
+ do {
+ ch = sdio_in(port, UART_RX);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+ /*
+ * For statistics only
+ */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ port->icount.brk++;
+ } else if (*status & UART_LSR_PE)
+ port->icount.parity++;
+ else if (*status & UART_LSR_FE)
+ port->icount.frame++;
+ if (*status & UART_LSR_OE)
+ port->icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ *status &= port->read_status_mask;
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ /*
+ * Overrun is special. Since it's reported immediately,
+ * it doesn't affect the current character.
+ */
+ if (*status & ~port->ignore_status_mask & UART_LSR_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+ *status = sdio_in(port, UART_LSR);
+ } while ((*status & UART_LSR_DR) && (max_count-- > 0));
+ tty_flip_buffer_push(tty);
+}
+
+static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
+{
+ struct circ_buf *xmit = &port->xmit;
+ int count;
+
+ if (port->x_char) {
+ sdio_out(port, UART_TX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+ if (circ_empty(xmit) || port->tty->stopped || port->tty->hw_stopped) {
+ sdio_uart_stop_tx(port);
+ return;
+ }
+
+ count = 16;
+ do {
+ sdio_out(port, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (circ_chars_pending(xmit) < WAKEUP_CHARS)
+ tty_wakeup(port->tty);
+
+ if (circ_empty(xmit))
+ sdio_uart_stop_tx(port);
+}
+
+static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
+{
+ int status;
+
+ status = sdio_in(port, UART_MSR);
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return;
+
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (status & UART_MSR_DCTS) {
+ port->icount.cts++;
+ if (port->tty->termios->c_cflag & CRTSCTS) {
+ int cts = (status & UART_MSR_CTS);
+ if (port->tty->hw_stopped) {
+ if (cts) {
+ port->tty->hw_stopped = 0;
+ sdio_uart_start_tx(port);
+ tty_wakeup(port->tty);
+ }
+ } else {
+ if (!cts) {
+ port->tty->hw_stopped = 1;
+ sdio_uart_stop_tx(port);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static void sdio_uart_irq(struct sdio_func *func)
+{
+ struct sdio_uart_port *port = sdio_get_drvdata(func);
+ unsigned int iir, lsr;
+
+ /*
+ * In a few places sdio_uart_irq() is called directly instead of
+ * waiting for the actual interrupt to be raised and the SDIO IRQ
+ * thread scheduled in order to reduce latency. However, some
+ * interaction with the tty core may end up calling us back
+ * (serial echo, flow control, etc.) through those same places
+ * causing undesirable effects. Let's stop the recursion here.
+ */
+ if (unlikely(port->in_sdio_uart_irq == current))
+ return;
+
+ iir = sdio_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return;
+
+ port->in_sdio_uart_irq = current;
+ lsr = sdio_in(port, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ sdio_uart_receive_chars(port, &lsr);
+ sdio_uart_check_modem_status(port);
+ if (lsr & UART_LSR_THRE)
+ sdio_uart_transmit_chars(port);
+ port->in_sdio_uart_irq = NULL;
+}
+
+static int sdio_uart_startup(struct sdio_uart_port *port)
+{
+ unsigned long page;
+ int ret;
+
+ /*
+ * Set the TTY IO error marker - we will only clear this
+ * once we have successfully opened the port.
+ */
+ set_bit(TTY_IO_ERROR, &port->tty->flags);
+
+ /* Initialise and allocate the transmit buffer. */
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ port->xmit.buf = (unsigned char *)page;
+ circ_clear(&port->xmit);
+
+ ret = sdio_uart_claim_func(port);
+ if (ret)
+ goto err1;
+ ret = sdio_enable_func(port->func);
+ if (ret)
+ goto err2;
+ ret = sdio_claim_irq(port->func, sdio_uart_irq);
+ if (ret)
+ goto err3;
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in sdio_change_speed())
+ */
+ sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+ sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ sdio_out(port, UART_FCR, 0);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) sdio_in(port, UART_LSR);
+ (void) sdio_in(port, UART_RX);
+ (void) sdio_in(port, UART_IIR);
+ (void) sdio_in(port, UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ sdio_out(port, UART_LCR, UART_LCR_WLEN8);
+
+ port->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE;
+ port->mctrl = TIOCM_OUT2;
+
+ sdio_uart_change_speed(port, port->tty->termios, NULL);
+
+ if (port->tty->termios->c_cflag & CBAUD)
+ sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+ if (port->tty->termios->c_cflag & CRTSCTS)
+ if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
+ port->tty->hw_stopped = 1;
+
+ clear_bit(TTY_IO_ERROR, &port->tty->flags);
+
+ /* Kick the IRQ handler once while we're still holding the host lock */
+ sdio_uart_irq(port->func);
+
+ sdio_uart_release_func(port);
+ return 0;
+
+err3:
+ sdio_disable_func(port->func);
+err2:
+ sdio_uart_release_func(port);
+err1:
+ free_page((unsigned long)port->xmit.buf);
+ return ret;
+}
+
+static void sdio_uart_shutdown(struct sdio_uart_port *port)
+{
+ int ret;
+
+ ret = sdio_uart_claim_func(port);
+ if (ret)
+ goto skip;
+
+ sdio_uart_stop_rx(port);
+
+ /* TODO: wait here for TX FIFO to drain */
+
+ /* Turn off DTR and RTS early. */
+ if (port->tty->termios->c_cflag & HUPCL)
+ sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+
+ /* Disable interrupts from this port */
+ sdio_release_irq(port->func);
+ port->ier = 0;
+ sdio_out(port, UART_IER, 0);
+
+ sdio_uart_clear_mctrl(port, TIOCM_OUT2);
+
+ /* Disable break condition and FIFOs. */
+ port->lcr &= ~UART_LCR_SBC;
+ sdio_out(port, UART_LCR, port->lcr);
+ sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ sdio_out(port, UART_FCR, 0);
+
+ sdio_disable_func(port->func);
+
+ sdio_uart_release_func(port);
+
+skip:
+ /* Free the transmit buffer page. */
+ free_page((unsigned long)port->xmit.buf);
+}
+
+static int sdio_uart_open (struct tty_struct *tty, struct file * filp)
+{
+ struct sdio_uart_port *port;
+ int ret;
+
+ port = sdio_uart_port_get(tty->index);
+ if (!port)
+ return -ENODEV;
+
+ mutex_lock(&port->open_lock);
+
+ /*
+ * Make sure not to mess up with a dead port
+ * which has not been closed yet.
+ */
+ if (tty->driver_data && tty->driver_data != port) {
+ mutex_unlock(&port->open_lock);
+ sdio_uart_port_put(port);
+ return -EBUSY;
+ }
+
+ if (!port->opened) {
+ tty->driver_data = port;
+ port->tty = tty;
+ ret = sdio_uart_startup(port);
+ if (ret) {
+ tty->driver_data = NULL;
+ port->tty = NULL;
+ mutex_unlock(&port->open_lock);
+ sdio_uart_port_put(port);
+ return ret;
+ }
+ }
+ port->opened++;
+ mutex_unlock(&port->open_lock);
+ return 0;
+}
+
+static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ if (!port)
+ return;
+
+ mutex_lock(&port->open_lock);
+ BUG_ON(!port->opened);
+
+ /*
+ * This is messy. The tty layer calls us even when open()
+ * returned an error. Ignore this close request if tty->count
+ * is larger than port->count.
+ */
+ if (tty->count > port->opened) {
+ mutex_unlock(&port->open_lock);
+ return;
+ }
+
+ if (--port->opened == 0) {
+ tty->closing = 1;
+ sdio_uart_shutdown(port);
+ tty_ldisc_flush(tty);
+ port->tty = NULL;
+ tty->driver_data = NULL;
+ tty->closing = 0;
+ }
+ mutex_unlock(&port->open_lock);
+ sdio_uart_port_put(port);
+}
+
+static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf,
+ int count)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ struct circ_buf *circ = &port->xmit;
+ int c, ret = 0;
+
+ if (!port->func)
+ return -ENODEV;
+
+ spin_lock(&port->write_lock);
+ while (1) {
+ c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
+ if (count < c)
+ c = count;
+ if (c <= 0)
+ break;
+ memcpy(circ->buf + circ->head, buf, c);
+ circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ spin_unlock(&port->write_lock);
+
+ if ( !(port->ier & UART_IER_THRI)) {
+ int err = sdio_uart_claim_func(port);
+ if (!err) {
+ sdio_uart_start_tx(port);
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+ } else
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int sdio_uart_write_room(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ return port ? circ_chars_free(&port->xmit) : 0;
+}
+
+static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ return port ? circ_chars_pending(&port->xmit) : 0;
+}
+
+static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ port->x_char = ch;
+ if (ch && !(port->ier & UART_IER_THRI)) {
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+ sdio_uart_start_tx(port);
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+ }
+}
+
+static void sdio_uart_throttle(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS))
+ return;
+
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+
+ if (I_IXOFF(tty)) {
+ port->x_char = STOP_CHAR(tty);
+ sdio_uart_start_tx(port);
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS)
+ sdio_uart_clear_mctrl(port, TIOCM_RTS);
+
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+}
+
+static void sdio_uart_unthrottle(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS))
+ return;
+
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (port->x_char) {
+ port->x_char = 0;
+ } else {
+ port->x_char = START_CHAR(tty);
+ sdio_uart_start_tx(port);
+ }
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS)
+ sdio_uart_set_mctrl(port, TIOCM_RTS);
+
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+}
+
+static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ unsigned int cflag = tty->termios->c_cflag;
+
+#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+ if ((cflag ^ old_termios->c_cflag) == 0 &&
+ RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0)
+ return;
+
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+
+ sdio_uart_change_speed(port, tty->termios, old_termios);
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
+ sdio_uart_clear_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+ unsigned int mask = TIOCM_DTR;
+ if (!(cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags))
+ mask |= TIOCM_RTS;
+ sdio_uart_set_mctrl(port, mask);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ sdio_uart_start_tx(port);
+ }
+
+ /* Handle turning on CRTSCTS */
+ if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
+ if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
+ tty->hw_stopped = 1;
+ sdio_uart_stop_tx(port);
+ }
+ }
+
+ sdio_uart_release_func(port);
+}
+
+static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int result;
+
+ result = sdio_uart_claim_func(port);
+ if (result != 0)
+ return result;
+
+ if (break_state == -1)
+ port->lcr |= UART_LCR_SBC;
+ else
+ port->lcr &= ~UART_LCR_SBC;
+ sdio_out(port, UART_LCR, port->lcr);
+
+ sdio_uart_release_func(port);
+ return 0;
+}
+
+static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int result;
+
+ result = sdio_uart_claim_func(port);
+ if (!result) {
+ result = port->mctrl | sdio_uart_get_mctrl(port);
+ sdio_uart_release_func(port);
+ }
+
+ return result;
+}
+
+static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int result;
+
+ result =sdio_uart_claim_func(port);
+ if(!result) {
+ sdio_uart_update_mctrl(port, set, clear);
+ sdio_uart_release_func(port);
+ }
+
+ return result;
+}
+
+static int sdio_uart_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i, len = 0;
+ off_t begin = 0;
+
+ len += sprintf(page, "serinfo:1.0 driver%s%s revision:%s\n",
+ "", "", "");
+ for (i = 0; i < UART_NR && len < PAGE_SIZE - 96; i++) {
+ struct sdio_uart_port *port = sdio_uart_port_get(i);
+ if (port) {
+ len += sprintf(page+len, "%d: uart:SDIO", i);
+ if(capable(CAP_SYS_ADMIN)) {
+ len += sprintf(page + len, " tx:%d rx:%d",
+ port->icount.tx, port->icount.rx);
+ if (port->icount.frame)
+ len += sprintf(page + len, " fe:%d",
+ port->icount.frame);
+ if (port->icount.parity)
+ len += sprintf(page + len, " pe:%d",
+ port->icount.parity);
+ if (port->icount.brk)
+ len += sprintf(page + len, " brk:%d",
+ port->icount.brk);
+ if (port->icount.overrun)
+ len += sprintf(page + len, " oe:%d",
+ port->icount.overrun);
+ if (port->icount.cts)
+ len += sprintf(page + len, " cts:%d",
+ port->icount.cts);
+ if (port->icount.dsr)
+ len += sprintf(page + len, " dsr:%d",
+ port->icount.dsr);
+ if (port->icount.rng)
+ len += sprintf(page + len, " rng:%d",
+ port->icount.rng);
+ if (port->icount.dcd)
+ len += sprintf(page + len, " dcd:%d",
+ port->icount.dcd);
+ }
+ strcat(page, "\n");
+ len++;
+ sdio_uart_port_put(port);
+ }
+
+ if (len + begin > off + count)
+ goto done;
+ if (len + begin < off) {
+ begin += len;
+ len = 0;
+ }
+ }
+ *eof = 1;
+
+done:
+ if (off >= len + begin)
+ return 0;
+ *start = page + (off - begin);
+ return (count < begin + len - off) ? count : (begin + len - off);
+}
+
+static const struct tty_operations sdio_uart_ops = {
+ .open = sdio_uart_open,
+ .close = sdio_uart_close,
+ .write = sdio_uart_write,
+ .write_room = sdio_uart_write_room,
+ .chars_in_buffer = sdio_uart_chars_in_buffer,
+ .send_xchar = sdio_uart_send_xchar,
+ .throttle = sdio_uart_throttle,
+ .unthrottle = sdio_uart_unthrottle,
+ .set_termios = sdio_uart_set_termios,
+ .break_ctl = sdio_uart_break_ctl,
+ .tiocmget = sdio_uart_tiocmget,
+ .tiocmset = sdio_uart_tiocmset,
+ .read_proc = sdio_uart_read_proc,
+};
+
+static struct tty_driver *sdio_uart_tty_driver;
+
+static int sdio_uart_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct sdio_uart_port *port;
+ int ret;
+
+ port = kzalloc(sizeof(struct sdio_uart_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ if (func->class == SDIO_CLASS_UART) {
+ printk(KERN_WARNING "%s: need info on UART class basic setup\n",
+ sdio_func_id(func));
+ kfree(port);
+ return -ENOSYS;
+ } else if (func->class == SDIO_CLASS_GPS) {
+ /*
+ * We need tuple 0x91. It contains SUBTPL_SIOREG
+ * and SUBTPL_RCVCAPS.
+ */
+ struct sdio_func_tuple *tpl;
+ for (tpl = func->tuples; tpl; tpl = tpl->next) {
+ if (tpl->code != 0x91)
+ continue;
+ if (tpl->size < 10)
+ continue;
+ if (tpl->data[1] == 0) /* SUBTPL_SIOREG */
+ break;
+ }
+ if (!tpl) {
+ printk(KERN_WARNING
+ "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
+ sdio_func_id(func));
+ kfree(port);
+ return -EINVAL;
+ }
+ printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
+ sdio_func_id(func), tpl->data[2], tpl->data[3]);
+ port->regs_offset = (tpl->data[4] << 0) |
+ (tpl->data[5] << 8) |
+ (tpl->data[6] << 16);
+ printk(KERN_DEBUG "%s: regs offset = 0x%x\n",
+ sdio_func_id(func), port->regs_offset);
+ port->uartclk = tpl->data[7] * 115200;
+ if (port->uartclk == 0)
+ port->uartclk = 115200;
+ printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n",
+ sdio_func_id(func), port->uartclk,
+ tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
+ } else {
+ kfree(port);
+ return -EINVAL;
+ }
+
+ port->func = func;
+ sdio_set_drvdata(func, port);
+
+ ret = sdio_uart_add_port(port);
+ if (ret) {
+ kfree(port);
+ } else {
+ struct device *dev;
+ dev = tty_register_device(sdio_uart_tty_driver, port->index, &func->dev);
+ if (IS_ERR(dev)) {
+ sdio_uart_port_remove(port);
+ ret = PTR_ERR(dev);
+ }
+ }
+
+ return ret;
+}
+
+static void sdio_uart_remove(struct sdio_func *func)
+{
+ struct sdio_uart_port *port = sdio_get_drvdata(func);
+
+ tty_unregister_device(sdio_uart_tty_driver, port->index);
+ sdio_uart_port_remove(port);
+}
+
+static const struct sdio_device_id sdio_uart_ids[] = {
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_UART) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_GPS) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, sdio_uart_ids);
+
+static struct sdio_driver sdio_uart_driver = {
+ .probe = sdio_uart_probe,
+ .remove = sdio_uart_remove,
+ .name = "sdio_uart",
+ .id_table = sdio_uart_ids,
+};
+
+static int __init sdio_uart_init(void)
+{
+ int ret;
+ struct tty_driver *tty_drv;
+
+ sdio_uart_tty_driver = tty_drv = alloc_tty_driver(UART_NR);
+ if (!tty_drv)
+ return -ENOMEM;
+
+ tty_drv->owner = THIS_MODULE;
+ tty_drv->driver_name = "sdio_uart";
+ tty_drv->name = "ttySDIO";
+ tty_drv->major = 0; /* dynamically allocated */
+ tty_drv->minor_start = 0;
+ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_drv->subtype = SERIAL_TYPE_NORMAL;
+ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_drv->init_termios = tty_std_termios;
+ tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
+ tty_drv->init_termios.c_ispeed = 4800;
+ tty_drv->init_termios.c_ospeed = 4800;
+ tty_set_operations(tty_drv, &sdio_uart_ops);
+
+ ret = tty_register_driver(tty_drv);
+ if (ret)
+ goto err1;
+
+ ret = sdio_register_driver(&sdio_uart_driver);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ tty_unregister_driver(tty_drv);
+err1:
+ put_tty_driver(tty_drv);
+ return ret;
+}
+
+static void __exit sdio_uart_exit(void)
+{
+ sdio_unregister_driver(&sdio_uart_driver);
+ tty_unregister_driver(sdio_uart_tty_driver);
+ put_tty_driver(sdio_uart_tty_driver);
+}
+
+module_init(sdio_uart_init);
+module_exit(sdio_uart_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");
OpenPOWER on IntegriCloud