diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 51 |
1 files changed, 14 insertions, 37 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 2219a76..2e6b1a3 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) } EXPORT_SYMBOL(init_buffer); -static int sync_buffer(void *word) +static int sleep_on_buffer(void *word) { - struct block_device *bd; - struct buffer_head *bh - = container_of(word, struct buffer_head, b_state); - - smp_mb(); - bd = bh->b_bdev; - if (bd) - blk_run_address_space(bd->bd_inode->i_mapping); io_schedule(); return 0; } void __lock_buffer(struct buffer_head *bh) { - wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, + wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_buffer); @@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer); */ void __wait_on_buffer(struct buffer_head * bh) { - wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); + wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__wait_on_buffer); @@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head tmp; - struct address_space *mapping, *prev_mapping = NULL; + struct address_space *mapping; int err = 0, err2; + struct blk_plug plug; INIT_LIST_HEAD(&tmp); + blk_start_plug(&plug); spin_lock(lock); while (!list_empty(list)) { @@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * still in flight on potentially older * contents. */ - write_dirty_buffer(bh, WRITE_SYNC_PLUG); + write_dirty_buffer(bh, WRITE_SYNC); /* * Kick off IO for the previous mapping. Note @@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * wait_on_buffer() will do that for us * through sync_buffer(). */ - if (prev_mapping && prev_mapping != mapping) - blk_run_address_space(prev_mapping); - prev_mapping = mapping; - brelse(bh); spin_lock(lock); } } } + spin_unlock(lock); + blk_finish_plug(&plug); + spin_lock(lock); + while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); get_bh(bh); @@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata); * prevents this contention from occurring. * * If block_write_full_page() is called with wbc->sync_mode == - * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this - * causes the writes to be flagged as synchronous writes, but the - * block device queue will NOT be unplugged, since usually many pages - * will be pushed to the out before the higher-level caller actually - * waits for the writes to be completed. The various wait functions, - * such as wait_on_writeback_range() will ultimately call sync_page() - * which will ultimately call blk_run_backing_dev(), which will end up - * unplugging the device queue. + * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this + * causes the writes to be flagged as synchronous writes. */ static int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, @@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; int write_op = (wbc->sync_mode == WB_SYNC_ALL ? - WRITE_SYNC_PLUG : WRITE); + WRITE_SYNC : WRITE); BUG_ON(!PageLocked(page)); @@ -3138,17 +3126,6 @@ out: } EXPORT_SYMBOL(try_to_free_buffers); -void block_sync_page(struct page *page) -{ - struct address_space *mapping; - - smp_mb(); - mapping = page_mapping(page); - if (mapping) - blk_run_backing_dev(mapping->backing_dev_info, page); -} -EXPORT_SYMBOL(block_sync_page); - /* * There are no bdflush tunables left. But distributions are * still running obsolete flush daemons, so we terminate them here. |