diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2010-07-25 14:29:22 +0300 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 18:53:56 +0200 |
commit | 6467716a37673e8d47b4984eb19839bdad0a8353 (patch) | |
tree | 8b2bfd38d53e31c47654162d4ce9220c4424a31d /mm | |
parent | 253c34e9b10c30d3064be654b5b78fbc1a8b1896 (diff) | |
download | op-kernel-dev-6467716a37673e8d47b4984eb19839bdad0a8353.zip op-kernel-dev-6467716a37673e8d47b4984eb19839bdad0a8353.tar.gz |
writeback: optimize periodic bdi thread wakeups
Whe the first inode for a bdi is marked dirty, we wake up the bdi thread which
should take care of the periodic background write-out. However, the write-out
will actually start only 'dirty_writeback_interval' centisecs later, so we can
delay the wake-up.
This change was requested by Nick Piggin who pointed out that if we delay the
wake-up, we weed out 2 unnecessary contex switches, which matters because
'__mark_inode_dirty()' is a hot-path function.
This patch introduces a new function - 'bdi_wakeup_thread_delayed()', which
sets up a timer to wake-up the bdi thread and returns. So the wake-up is
delayed.
We also delete the timer in bdi threads just before writing-back. And
synchronously delete it when unregistering bdi. At the unregister point the bdi
does not have any users, so no one can arm it again.
Since now we take 'bdi->wb_lock' in the timer, which can execute in softirq
context, we have to use 'spin_lock_bh()' for 'bdi->wb_lock'. This patch makes
this change as well.
This patch also moves the 'bdi_wb_init()' function down in the file to avoid
forward-declaration of 'bdi_wakeup_thread_delayed()'.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 73 |
1 files changed, 57 insertions, 16 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index a9a08d8..cfff722 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -248,17 +248,6 @@ static int __init default_bdi_init(void) } subsys_initcall(default_bdi_init); -static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) -{ - memset(wb, 0, sizeof(*wb)); - - wb->bdi = bdi; - wb->last_old_flush = jiffies; - INIT_LIST_HEAD(&wb->b_dirty); - INIT_LIST_HEAD(&wb->b_io); - INIT_LIST_HEAD(&wb->b_more_io); -} - int bdi_has_dirty_io(struct backing_dev_info *bdi) { return wb_has_dirty_io(&bdi->wb); @@ -316,6 +305,43 @@ static void sync_supers_timer_fn(unsigned long unused) bdi_arm_supers_timer(); } +static void wakeup_timer_fn(unsigned long data) +{ + struct backing_dev_info *bdi = (struct backing_dev_info *)data; + + spin_lock_bh(&bdi->wb_lock); + if (bdi->wb.task) { + wake_up_process(bdi->wb.task); + } else { + /* + * When bdi tasks are inactive for long time, they are killed. + * In this case we have to wake-up the forker thread which + * should create and run the bdi thread. + */ + wake_up_process(default_backing_dev_info.wb.task); + } + spin_unlock_bh(&bdi->wb_lock); +} + +/* + * This function is used when the first inode for this bdi is marked dirty. It + * wakes-up the corresponding bdi thread which should then take care of the + * periodic background write-out of dirty inodes. Since the write-out would + * starts only 'dirty_writeback_interval' centisecs from now anyway, we just + * set up a timer which wakes the bdi thread up later. + * + * Note, we wouldn't bother setting up the timer, but this function is on the + * fast-path (used by '__mark_inode_dirty()'), so we save few context switches + * by delaying the wake-up. + */ +void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) +{ + unsigned long timeout; + + timeout = msecs_to_jiffies(dirty_writeback_interval * 10); + mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); +} + /* * Calculate the longest interval (jiffies) bdi threads are allowed to be * inactive. @@ -353,8 +379,10 @@ static int bdi_forker_thread(void *ptr) * Temporary measure, we want to make sure we don't see * dirty data on the default backing_dev_info */ - if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) + if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { + del_timer(&me->wakeup_timer); wb_do_writeback(me, 0); + } spin_lock_bh(&bdi_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -386,7 +414,7 @@ static int bdi_forker_thread(void *ptr) break; } - spin_lock(&bdi->wb_lock); + spin_lock_bh(&bdi->wb_lock); /* * If there is no work to do and the bdi thread was * inactive long enough - kill it. The wb_lock is taken @@ -403,7 +431,7 @@ static int bdi_forker_thread(void *ptr) action = KILL_THREAD; break; } - spin_unlock(&bdi->wb_lock); + spin_unlock_bh(&bdi->wb_lock); } spin_unlock_bh(&bdi_lock); @@ -427,9 +455,9 @@ static int bdi_forker_thread(void *ptr) * The spinlock makes sure we do not lose * wake-ups when racing with 'bdi_queue_work()'. */ - spin_lock(&bdi->wb_lock); + spin_lock_bh(&bdi->wb_lock); bdi->wb.task = task; - spin_unlock(&bdi->wb_lock); + spin_unlock_bh(&bdi->wb_lock); } break; @@ -586,6 +614,7 @@ void bdi_unregister(struct backing_dev_info *bdi) if (bdi->dev) { trace_writeback_bdi_unregister(bdi); bdi_prune_sb(bdi); + del_timer_sync(&bdi->wb.wakeup_timer); if (!bdi_cap_flush_forker(bdi)) bdi_wb_shutdown(bdi); @@ -596,6 +625,18 @@ void bdi_unregister(struct backing_dev_info *bdi) } EXPORT_SYMBOL(bdi_unregister); +static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) +{ + memset(wb, 0, sizeof(*wb)); + + wb->bdi = bdi; + wb->last_old_flush = jiffies; + INIT_LIST_HEAD(&wb->b_dirty); + INIT_LIST_HEAD(&wb->b_io); + INIT_LIST_HEAD(&wb->b_more_io); + setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); +} + int bdi_init(struct backing_dev_info *bdi) { int i, err; |