summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2015-07-17 16:24:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-17 16:39:54 -0700
commita2673b6e040663bf16a552f8619e6bde9f4b9acf (patch)
tree88ccec30ae06241d16a7695b772f0fa5a1c4a053 /fs
parent3581d458c39bc5e58ceeeaf51796625bc978d2eb (diff)
downloadop-kernel-dev-a2673b6e040663bf16a552f8619e6bde9f4b9acf.zip
op-kernel-dev-a2673b6e040663bf16a552f8619e6bde9f4b9acf.tar.gz
fsnotify: fix oops in fsnotify_clear_marks_by_group_flags()
fsnotify_clear_marks_by_group_flags() can race with fsnotify_destroy_marks() so when fsnotify_destroy_mark_locked() drops mark_mutex, a mark from the list iterated by fsnotify_clear_marks_by_group_flags() can be freed and we dereference free memory in the loop there. Fix the problem by keeping mark_mutex held in fsnotify_destroy_mark_locked(). The reason why we drop that mutex is that we need to call a ->freeing_mark() callback which may acquire mark_mutex again. To avoid this and similar lock inversion issues, we move the call to ->freeing_mark() callback to the kthread destroying the mark. Signed-off-by: Jan Kara <jack@suse.cz> Reported-by: Ashish Sangwan <a.sangwan@samsung.com> Suggested-by: Lino Sanfilippo <LinoSanfilippo@gmx.de> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/notify/mark.c34
1 files changed, 14 insertions, 20 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c7..3e594ce4 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -152,31 +152,15 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
BUG();
list_del_init(&mark->g_list);
-
spin_unlock(&mark->lock);
if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
iput(inode);
- /* release lock temporarily */
- mutex_unlock(&group->mark_mutex);
spin_lock(&destroy_lock);
list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
- /*
- * We don't necessarily have a ref on mark from caller so the above destroy
- * may have actually freed it, unless this group provides a 'freeing_mark'
- * function which must be holding a reference.
- */
-
- /*
- * Some groups like to know that marks are being freed. This is a
- * callback to the group function to let it know that this mark
- * is being freed.
- */
- if (group->ops->freeing_mark)
- group->ops->freeing_mark(mark, group);
/*
* __fsnotify_update_child_dentry_flags(inode);
@@ -191,8 +175,6 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
*/
atomic_dec(&group->num_marks);
-
- mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
}
void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -205,7 +187,10 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
/*
* Destroy all marks in the given list. The marks must be already detached from
- * the original inode / vfsmount.
+ * the original inode / vfsmount. Note that we can race with
+ * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each
+ * mark so they won't get freed from under us and nobody else touches our
+ * free_list list_head.
*/
void fsnotify_destroy_marks(struct list_head *to_free)
{
@@ -406,7 +391,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
}
/*
- * clear any marks in a group in which mark->flags & flags is true
+ * Clear any marks in a group in which mark->flags & flags is true.
*/
void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
@@ -460,6 +445,7 @@ static int fsnotify_mark_destroy(void *ignored)
{
struct fsnotify_mark *mark, *next;
struct list_head private_destroy_list;
+ struct fsnotify_group *group;
for (;;) {
spin_lock(&destroy_lock);
@@ -471,6 +457,14 @@ static int fsnotify_mark_destroy(void *ignored)
list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
list_del_init(&mark->g_list);
+ group = mark->group;
+ /*
+ * Some groups like to know that marks are being freed.
+ * This is a callback to the group function to let it
+ * know that this mark is being freed.
+ */
+ if (group && group->ops->freeing_mark)
+ group->ops->freeing_mark(mark, group);
fsnotify_put_mark(mark);
}
OpenPOWER on IntegriCloud