summaryrefslogtreecommitdiffstats
path: root/drivers/tty/tty_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 16:17:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 16:17:32 -0700
commitf23eb2b2b28547fc70df82dd5049eb39bec5ba12 (patch)
tree144dce462b34d8a232a06f766786ebfb0235fa87 /drivers/tty/tty_buffer.c
parentf741a79e982cf56d7584435bad663553ffe6715f (diff)
downloadop-kernel-dev-f23eb2b2b28547fc70df82dd5049eb39bec5ba12.zip
op-kernel-dev-f23eb2b2b28547fc70df82dd5049eb39bec5ba12.tar.gz
tty: stop using "delayed_work" in the tty layer
Using delayed-work for tty flip buffers ends up causing us to wait for the next tick to complete some actions. That's usually not all that noticeable, but for certain latency-critical workloads it ends up being totally unacceptable. As an extreme case of this, passing a token back-and-forth over a pty will take two ticks per iteration, so even just a thousand iterations will take 8 seconds assuming a common 250Hz configuration. Avoiding the whole delayed work issue brings that ping-pong test-case down to 0.009s on my machine. In more practical terms, this latency has been a performance problem for things like dive computer simulators (simulating the serial interface using the ptys) and for other environments (Alan mentions a CP/M emulator). Reported-by: Jef Driesen <jefdriesen@telenet.be> Acked-by: Greg KH <gregkh@suse.de> Acked-by: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/tty/tty_buffer.c')
-rw-r--r--drivers/tty/tty_buffer.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index d8210ca..b945121 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -322,7 +322,7 @@ void tty_schedule_flip(struct tty_struct *tty)
if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
spin_unlock_irqrestore(&tty->buf.lock, flags);
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_schedule_flip);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_struct *tty =
- container_of(work, struct tty_struct, buf.work.work);
+ container_of(work, struct tty_struct, buf.work);
unsigned long flags;
struct tty_ldisc *disc;
@@ -443,7 +443,7 @@ static void flush_to_ldisc(struct work_struct *work)
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
if (!tty->receive_room || seen_tail) {
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
break;
}
if (count > tty->receive_room)
@@ -481,7 +481,7 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- flush_delayed_work(&tty->buf.work);
+ flush_work(&tty->buf.work);
}
/**
@@ -506,9 +506,9 @@ void tty_flip_buffer_push(struct tty_struct *tty)
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
- flush_to_ldisc(&tty->buf.work.work);
+ flush_to_ldisc(&tty->buf.work);
else
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
@@ -529,6 +529,6 @@ void tty_buffer_init(struct tty_struct *tty)
tty->buf.tail = NULL;
tty->buf.free = NULL;
tty->buf.memory_used = 0;
- INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
+ INIT_WORK(&tty->buf.work, flush_to_ldisc);
}
OpenPOWER on IntegriCloud