summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/inode.c1
-rw-r--r--fs/notify/inode_mark.c72
-rw-r--r--include/linux/fsnotify_backend.h4
3 files changed, 77 insertions, 0 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 54c63ce..ca33701 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -407,6 +407,7 @@ int invalidate_inodes(struct super_block *sb)
mutex_lock(&iprune_mutex);
spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
+ fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 282150f..0a499d2 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -89,6 +89,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
@@ -351,3 +352,74 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
return ret;
}
+
+/**
+ * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
+ * @list: list of inodes being unmounted (sb->s_inodes)
+ *
+ * Called with inode_lock held, protecting the unmounting super block's list
+ * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
+ * We temporarily drop inode_lock, however, and CAN block.
+ */
+void fsnotify_unmount_inodes(struct list_head *list)
+{
+ struct inode *inode, *next_i, *need_iput = NULL;
+
+ list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
+ struct inode *need_iput_tmp;
+
+ /*
+ * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
+ * I_WILL_FREE, or I_NEW which is fine because by that point
+ * the inode cannot have any associated watches.
+ */
+ if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
+ continue;
+
+ /*
+ * If i_count is zero, the inode cannot have any watches and
+ * doing an __iget/iput with MS_ACTIVE clear would actually
+ * evict all inodes with zero i_count from icache which is
+ * unnecessarily violent and may in fact be illegal to do.
+ */
+ if (!atomic_read(&inode->i_count))
+ continue;
+
+ need_iput_tmp = need_iput;
+ need_iput = NULL;
+
+ /* In case fsnotify_inode_delete() drops a reference. */
+ if (inode != need_iput_tmp)
+ __iget(inode);
+ else
+ need_iput_tmp = NULL;
+
+ /* In case the dropping of a reference would nuke next_i. */
+ if ((&next_i->i_sb_list != list) &&
+ atomic_read(&next_i->i_count) &&
+ !(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) {
+ __iget(next_i);
+ need_iput = next_i;
+ }
+
+ /*
+ * We can safely drop inode_lock here because we hold
+ * references on both inode and next_i. Also no new inodes
+ * will be added since the umount has begun. Finally,
+ * iprune_mutex keeps shrink_icache_memory() away.
+ */
+ spin_unlock(&inode_lock);
+
+ if (need_iput_tmp)
+ iput(need_iput_tmp);
+
+ /* for each watch, send FS_UNMOUNT and then remove it */
+ fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+
+ fsnotify_inode_delete(inode);
+
+ iput(inode);
+
+ spin_lock(&inode_lock);
+ }
+}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index efdf9e4..d2c0ee3 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -336,6 +336,7 @@ extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
@@ -365,6 +366,9 @@ static inline u32 fsnotify_get_cookie(void)
return 0;
}
+static inline void fsnotify_unmount_inodes(struct list_head *list)
+{}
+
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
OpenPOWER on IntegriCloud