diff options
author | Jan Kara <jack@suse.cz> | 2005-09-06 15:19:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-07 16:57:55 -0700 |
commit | a7662236253374012d364106b6dc9161bd929e2e (patch) | |
tree | 59f34811d18fef8195bc3200bdb2684598175f29 | |
parent | e6c9f5c1888097c936334bf9740024520ca47b8e (diff) | |
download | op-kernel-dev-a7662236253374012d364106b6dc9161bd929e2e.zip op-kernel-dev-a7662236253374012d364106b6dc9161bd929e2e.tar.gz |
[PATCH] Make ll_rw_block() wait for buffer lock
Introduce new ll_rw_block() operation SWRITE meaning that block layer should
wait for the buffer lock and write-out afterwards. Hence data in buffers at
the time of call are guaranteed to be submitted to the disk.
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | fs/buffer.c | 30 | ||||
-rw-r--r-- | include/linux/fs.h | 1 |
2 files changed, 17 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index a92b814..1c62203 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -917,8 +917,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * contents - it is a noop if I/O is still in * flight on potentially older contents. */ - wait_on_buffer(bh); - ll_rw_block(WRITE, 1, &bh); + ll_rw_block(SWRITE, 1, &bh); brelse(bh); spin_lock(lock); } @@ -2793,21 +2792,22 @@ int submit_bh(int rw, struct buffer_head * bh) /** * ll_rw_block: low-level access to block devices (DEPRECATED) - * @rw: whether to %READ or %WRITE or maybe %READA (readahead) + * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) * @nr: number of &struct buffer_heads in the array * @bhs: array of pointers to &struct buffer_head * - * ll_rw_block() takes an array of pointers to &struct buffer_heads, - * and requests an I/O operation on them, either a %READ or a %WRITE. - * The third %READA option is described in the documentation for - * generic_make_request() which ll_rw_block() calls. + * ll_rw_block() takes an array of pointers to &struct buffer_heads, and + * requests an I/O operation on them, either a %READ or a %WRITE. The third + * %SWRITE is like %WRITE only we make sure that the *current* data in buffers + * are sent to disk. The fourth %READA option is described in the documentation + * for generic_make_request() which ll_rw_block() calls. * * This function drops any buffer that it cannot get a lock on (with the - * BH_Lock state bit), any buffer that appears to be clean when doing a - * write request, and any buffer that appears to be up-to-date when doing - * read request. Further it marks as clean buffers that are processed for - * writing (the buffer cache won't assume that they are actually clean until - * the buffer gets unlocked). + * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be + * clean when doing a write request, and any buffer that appears to be + * up-to-date when doing read request. Further it marks as clean buffers that + * are processed for writing (the buffer cache won't assume that they are + * actually clean until the buffer gets unlocked). * * ll_rw_block sets b_end_io to simple completion handler that marks * the buffer up-to-date (if approriate), unlocks the buffer and wakes @@ -2823,11 +2823,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; - if (test_set_buffer_locked(bh)) + if (rw == SWRITE) + lock_buffer(bh); + else if (test_set_buffer_locked(bh)) continue; get_bh(bh); - if (rw == WRITE) { + if (rw == WRITE || rw == SWRITE) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; submit_bh(WRITE, bh); diff --git a/include/linux/fs.h b/include/linux/fs.h index 7e1b589..fd93ab7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -69,6 +69,7 @@ extern int dir_notify_enable; #define READ 0 #define WRITE 1 #define READA 2 /* read-ahead - don't block if no resources */ +#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ #define SPECIAL 4 /* For non-blockdevice requests in request queue */ #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) |