diff options
author | Pierre Ossman <drzeus@drzeus.cx> | 2007-09-27 10:48:29 +0200 |
---|---|---|
committer | Pierre Ossman <drzeus@drzeus.cx> | 2007-09-27 10:48:29 +0200 |
commit | 6f4285d13300f1c8cd675a41ab390cea06173cd1 (patch) | |
tree | d0611c5e278af5c85bb157cd1b5c0db233ecaf02 /drivers/mmc | |
parent | 5d3ad4e8a12e538eead0a37d22b1ba6aec0f2127 (diff) | |
download | op-kernel-dev-6f4285d13300f1c8cd675a41ab390cea06173cd1.zip op-kernel-dev-6f4285d13300f1c8cd675a41ab390cea06173cd1.tar.gz |
sdio: adaptive interrupt polling
The interrupt polling frequency is a compromise between power usage and
interrupt latency. Unfortunately, it affects throughput rather severely
for devices which require an interrupt for every chunk of data.
By making the polling frequency adaptive, we get better throughput with
those devices without sacficing too much power. Polling will quickly
increase when there is an actual interrupt, and slowly fall back to the
idle frequency when the interrupts stop coming.
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/core/sdio_irq.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 8843a4c..f78ffee 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -27,7 +27,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card) { - int i, ret; + int i, ret, count; unsigned char pending; ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); @@ -37,6 +37,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card) return ret; } + count = 0; for (i = 1; i <= 7; i++) { if (pending & (1 << i)) { struct sdio_func *func = card->sdio_func[i - 1]; @@ -46,20 +47,21 @@ static int process_sdio_pending_irqs(struct mmc_card *card) sdio_func_id(func)); } else if (func->irq_handler) { func->irq_handler(func); + count++; } else printk(KERN_WARNING "%s: pending IRQ with no handler\n", sdio_func_id(func)); } } - return 0; + return count; } static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; struct sched_param param = { .sched_priority = 1 }; - unsigned long period; + unsigned long period, idle_period; int ret; sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -70,8 +72,9 @@ static int sdio_irq_thread(void *_host) * asynchronous notification of pending SDIO card interrupts * hence we poll for them in that case. */ + idle_period = msecs_to_jiffies(10); period = (host->caps & MMC_CAP_SDIO_IRQ) ? - MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(10); + MAX_SCHEDULE_TIMEOUT : idle_period; pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", mmc_hostname(host), period); @@ -101,9 +104,24 @@ static int sdio_irq_thread(void *_host) * errors. FIXME: determine if due to card removal and * possibly exit this thread if so. */ - if (ret) + if (ret < 0) ssleep(1); + /* + * Adaptive polling frequency based on the assumption + * that an interrupt will be closely followed by more. + * This has a substantial benefit for network devices. + */ + if (!(host->caps & MMC_CAP_SDIO_IRQ)) { + if (ret > 0) + period /= 2; + else { + period++; + if (period > idle_period) + period = idle_period; + } + } + set_task_state(current, TASK_INTERRUPTIBLE); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); |