summaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-17 23:57:20 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-17 23:57:20 +0200
commit393d81aa026e19b6ede6f5f11955c97ee62e5df5 (patch)
treea1d9511e488e19d41089ff0a736f6ce52a81c6e5 /fs/fs-writeback.c
parent93a0886e2368eafb9df5e2021fb185195cee88b2 (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
downloadop-kernel-dev-393d81aa026e19b6ede6f5f11955c97ee62e5df5.zip
op-kernel-dev-393d81aa026e19b6ede6f5f11955c97ee62e5df5.tar.gz
Merge branch 'linus' into xen-64bit
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ae45f77..25adfc3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -424,8 +424,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
* that it can be located for waiting on in __writeback_single_inode().
*
- * Called under inode_lock.
- *
* If `bdi' is non-zero then we're being asked to writeback a specific queue.
* This function assumes that the blockdev superblock's inodes are backed by
* a variety of queues, so all inodes are searched. For other superblocks,
@@ -441,11 +439,12 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* on the writer throttling path, and we get decent balancing between many
* throttled threads: we don't want them all piling up on inode_sync_wait.
*/
-static void
-sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
+void generic_sync_sb_inodes(struct super_block *sb,
+ struct writeback_control *wbc)
{
const unsigned long start = jiffies; /* livelock avoidance */
+ spin_lock(&inode_lock);
if (!wbc->for_kupdate || list_empty(&sb->s_io))
queue_io(sb, wbc->older_than_this);
@@ -524,8 +523,16 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
if (!list_empty(&sb->s_more_io))
wbc->more_io = 1;
}
+ spin_unlock(&inode_lock);
return; /* Leave any unwritten inodes on s_io */
}
+EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
+
+static void sync_sb_inodes(struct super_block *sb,
+ struct writeback_control *wbc)
+{
+ generic_sync_sb_inodes(sb, wbc);
+}
/*
* Start writeback of dirty pagecache data against all unlocked inodes.
@@ -565,11 +572,8 @@ restart:
* be unmounted by the time it is released.
*/
if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root) {
- spin_lock(&inode_lock);
+ if (sb->s_root)
sync_sb_inodes(sb, wbc);
- spin_unlock(&inode_lock);
- }
up_read(&sb->s_umount);
}
spin_lock(&sb_lock);
@@ -607,9 +611,7 @@ void sync_inodes_sb(struct super_block *sb, int wait)
(inodes_stat.nr_inodes - inodes_stat.nr_unused) +
nr_dirty + nr_unstable;
wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
- spin_lock(&inode_lock);
sync_sb_inodes(sb, &wbc);
- spin_unlock(&inode_lock);
}
/*
OpenPOWER on IntegriCloud