From 606cdcca04a609ed4dfbfe788942de9477da556b Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Wed, 4 Jun 2014 12:19:12 +0800 Subject: quota: protect Q_GETFMT by dqonoff_mutex dqptr_sem will go away. Protect Q_GETFMT quotactl by dqonoff_mutex instead. This is also enough to make sure quota info will not go away while we are looking at it. Signed-off-by: Lai Siyao Signed-off-by: Niu Yawei Signed-off-by: Jan Kara --- fs/quota/quota.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/quota/quota.c b/fs/quota/quota.c index ff3f0b3..7562164 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr) { __u32 fmt; - down_read(&sb_dqopt(sb)->dqptr_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_active(sb, type)) { - up_read(&sb_dqopt(sb)->dqptr_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; - up_read(&sb_dqopt(sb)->dqptr_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); if (copy_to_user(addr, &fmt, sizeof(fmt))) return -EFAULT; return 0; -- cgit v1.1 From 1ea06bec78a128adc995ca32bd906a6c9bb9cf91 Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Wed, 4 Jun 2014 12:20:30 +0800 Subject: quota: avoid unnecessary dqget()/dqput() calls Avoid unnecessary dqget()/dqput() calls in __dquot_initialize(), that will introduce global lock contention otherwise. Signed-off-by: Lai Siyao Signed-off-by: Niu Yawei Signed-off-by: Jan Kara --- fs/quota/dquot.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7f30bdc..2517719 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1402,7 +1402,7 @@ static int dquot_active(const struct inode *inode) */ static void __dquot_initialize(struct inode *inode, int type) { - int cnt; + int cnt, init_needed = 0; struct dquot *got[MAXQUOTAS]; struct super_block *sb = inode->i_sb; qsize_t rsv; @@ -1418,6 +1418,15 @@ static void __dquot_initialize(struct inode *inode, int type) got[cnt] = NULL; if (type != -1 && cnt != type) continue; + /* + * The i_dquot should have been initialized in most cases, + * we check it without locking here to avoid unnecessary + * dqget()/dqput() calls. + */ + if (inode->i_dquot[cnt]) + continue; + init_needed = 1; + switch (cnt) { case USRQUOTA: qid = make_kqid_uid(inode->i_uid); @@ -1429,6 +1438,10 @@ static void __dquot_initialize(struct inode *inode, int type) got[cnt] = dqget(sb, qid); } + /* All required i_dquot has been initialized */ + if (!init_needed) + return; + down_write(&sb_dqopt(sb)->dqptr_sem); if (IS_NOQUOTA(inode)) goto out_err; -- cgit v1.1 From 9eb6463f31cf720deaf0e810cacc403d7720b10c Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Wed, 4 Jun 2014 12:21:30 +0800 Subject: quota: simplify remove_inode_dquot_ref() Simplify the remove_inode_dquot_ref() to make it more obvious that now we keep one reference for each dquot from inodes. Signed-off-by: Jan Kara Signed-off-by: Niu Yawei Signed-off-by: Jan Kara --- fs/quota/dquot.c | 51 +++++++++++++++++++-------------------------------- 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 2517719..fb2d2e2 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -733,7 +733,6 @@ static struct shrinker dqcache_shrinker = { /* * Put reference to dquot - * NOTE: If you change this function please check whether dqput_blocks() works right... */ void dqput(struct dquot *dquot) { @@ -963,46 +962,34 @@ static void add_dquot_ref(struct super_block *sb, int type) } /* - * Return 0 if dqput() won't block. - * (note that 1 doesn't necessarily mean blocking) - */ -static inline int dqput_blocks(struct dquot *dquot) -{ - if (atomic_read(&dquot->dq_count) <= 1) - return 1; - return 0; -} - -/* * Remove references to dquots from inode and add dquot to list for freeing * if we have the last reference to dquot * We can't race with anybody because we hold dqptr_sem for writing... */ -static int remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head) +static void remove_inode_dquot_ref(struct inode *inode, int type, + struct list_head *tofree_head) { struct dquot *dquot = inode->i_dquot[type]; inode->i_dquot[type] = NULL; - if (dquot) { - if (dqput_blocks(dquot)) { -#ifdef CONFIG_QUOTA_DEBUG - if (atomic_read(&dquot->dq_count) != 1) - quota_error(inode->i_sb, "Adding dquot with " - "dq_count %d to dispose list", - atomic_read(&dquot->dq_count)); -#endif - spin_lock(&dq_list_lock); - /* As dquot must have currently users it can't be on - * the free list... */ - list_add(&dquot->dq_free, tofree_head); - spin_unlock(&dq_list_lock); - return 1; - } - else - dqput(dquot); /* We have guaranteed we won't block */ + if (!dquot) + return; + + if (list_empty(&dquot->dq_free)) { + /* + * The inode still has reference to dquot so it can't be in the + * free list + */ + spin_lock(&dq_list_lock); + list_add(&dquot->dq_free, tofree_head); + spin_unlock(&dq_list_lock); + } else { + /* + * Dquot is already in a list to put so we won't drop the last + * reference here. + */ + dqput(dquot); } - return 0; } /* -- cgit v1.1 From b9ba6f94b2382ef832f97122976b73004f714714 Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Wed, 4 Jun 2014 12:23:19 +0800 Subject: quota: remove dqptr_sem Remove dqptr_sem to make quota code scalable: Remove the dqptr_sem, accessing inode->i_dquot now protected by dquot_srcu, and changing inode->i_dquot is now serialized by dq_data_lock. Signed-off-by: Lai Siyao Signed-off-by: Niu Yawei Signed-off-by: Jan Kara --- fs/quota/dquot.c | 114 ++++++++++++++++++++++---------------------------- fs/super.c | 1 - include/linux/quota.h | 1 - 3 files changed, 49 insertions(+), 67 deletions(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index fb2d2e2..f2d0eee 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -96,13 +96,16 @@ * Note that some things (eg. sb pointer, type, id) doesn't change during * the life of the dquot structure and so needn't to be protected by a lock * - * Any operation working on dquots via inode pointers must hold dqptr_sem. If - * operation is just reading pointers from inode (or not using them at all) the - * read lock is enough. If pointers are altered function must hold write lock. + * Operation accessing dquots via inode pointers are protected by dquot_srcu. + * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and + * synchronize_srcu(&dquot_srcu) is called after clearing pointers from + * inode and before dropping dquot references to avoid use of dquots after + * they are freed. dq_data_lock is used to serialize the pointer setting and + * clearing operations. * Special care needs to be taken about S_NOQUOTA inode flag (marking that * inode is a quota file). Functions adding pointers from inode to dquots have - * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they - * have to do all pointer modifications before dropping dqptr_sem. This makes + * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they + * have to do all pointer modifications before dropping dq_data_lock. This makes * sure they cannot race with quotaon which first sets S_NOQUOTA flag and * then drops all pointers to dquots from an inode. * @@ -116,21 +119,15 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > - * dqio_mutex + * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > - * dqptr_sem. But filesystem has to count with the fact that functions such as - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called - * from inside a transaction to keep filesystem consistency after a crash. Also - * filesystems usually want to do some IO on dquot from ->mark_dirty which is - * called with dqptr_sem held. */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); +DEFINE_STATIC_SRCU(dquot_srcu); void __quota_error(struct super_block *sb, const char *func, const char *fmt, ...) @@ -964,7 +961,6 @@ static void add_dquot_ref(struct super_block *sb, int type) /* * Remove references to dquots from inode and add dquot to list for freeing * if we have the last reference to dquot - * We can't race with anybody because we hold dqptr_sem for writing... */ static void remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) @@ -1024,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type, * We have to scan also I_NEW inodes because they can already * have quota pointer initialized. Luckily, we need to touch * only quota pointers and these have separate locking - * (dqptr_sem). + * (dq_data_lock). */ + spin_lock(&dq_data_lock); if (!IS_NOQUOTA(inode)) { if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; remove_inode_dquot_ref(inode, type, tofree_head); } + spin_unlock(&dq_data_lock); } spin_unlock(&inode_sb_list_lock); #ifdef CONFIG_QUOTA_DEBUG @@ -1048,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type) LIST_HEAD(tofree_head); if (sb->dq_op) { - down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); - up_write(&sb_dqopt(sb)->dqptr_sem); + synchronize_srcu(&dquot_srcu); put_dquot_list(&tofree_head); } } @@ -1381,9 +1378,6 @@ static int dquot_active(const struct inode *inode) /* * Initialize quota pointers in inode * - * We do things in a bit complicated way but by that we avoid calling - * dqget() and thus filesystem callbacks under dqptr_sem. - * * It is better to call this function outside of any transaction as it * might need a lot of space in journal for dquot structure allocation. */ @@ -1394,8 +1388,6 @@ static void __dquot_initialize(struct inode *inode, int type) struct super_block *sb = inode->i_sb; qsize_t rsv; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return; @@ -1429,7 +1421,7 @@ static void __dquot_initialize(struct inode *inode, int type) if (!init_needed) return; - down_write(&sb_dqopt(sb)->dqptr_sem); + spin_lock(&dq_data_lock); if (IS_NOQUOTA(inode)) goto out_err; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type) * did a write before quota was turned on */ rsv = inode_get_rsv_space(inode); - if (unlikely(rsv)) { - spin_lock(&dq_data_lock); + if (unlikely(rsv)) dquot_resv_space(inode->i_dquot[cnt], rsv); - spin_unlock(&dq_data_lock); - } } } out_err: - up_write(&sb_dqopt(sb)->dqptr_sem); + spin_unlock(&dq_data_lock); /* Drop unused references */ dqput_all(got); } @@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode) EXPORT_SYMBOL(dquot_initialize); /* - * Release all quotas referenced by inode + * Release all quotas referenced by inode. + * + * This function only be called on inode free or converting + * a file to quota file, no other users for the i_dquot in + * both cases, so we needn't call synchronize_srcu() after + * clearing i_dquot. */ static void __dquot_drop(struct inode *inode) { int cnt; struct dquot *put[MAXQUOTAS]; - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { put[cnt] = inode->i_dquot[cnt]; inode->i_dquot[cnt] = NULL; } - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_unlock(&dq_data_lock); dqput_all(put); } @@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) */ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { - int cnt, ret = 0; + int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; struct dquot **dquots = inode->i_dquot; int reserve = flags & DQUOT_SPACE_RESERVE; - /* - * First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex - */ if (!dquot_active(inode)) { inode_incr_space(inode, number, reserve); goto out; @@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) @@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) goto out_flush_warn; mark_all_dquot_dirty(dquots); out_flush_warn: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); out: return ret; @@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space); */ int dquot_alloc_inode(const struct inode *inode) { - int cnt, ret = 0; + int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; struct dquot * const *dquots = inode->i_dquot; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) @@ -1685,7 +1674,7 @@ warn_put_all: spin_unlock(&dq_data_lock); if (ret == 0) mark_all_dquot_dirty(dquots); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); return ret; } @@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode); */ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { - int cnt; + int cnt, index; if (!dquot_active(inode)) { inode_claim_rsv_space(inode, number); return 0; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) inode_claim_rsv_space(inode, number); spin_unlock(&dq_data_lock); mark_all_dquot_dirty(inode->i_dquot); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); return 0; } EXPORT_SYMBOL(dquot_claim_space_nodirty); @@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); */ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) { - int cnt; + int cnt, index; if (!dquot_active(inode)) { inode_reclaim_rsv_space(inode, number); return; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) inode_reclaim_rsv_space(inode, number); spin_unlock(&dq_data_lock); mark_all_dquot_dirty(inode->i_dquot); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); return; } EXPORT_SYMBOL(dquot_reclaim_space_nodirty); @@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; struct dquot **dquots = inode->i_dquot; - int reserve = flags & DQUOT_SPACE_RESERVE; + int reserve = flags & DQUOT_SPACE_RESERVE, index; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) { inode_decr_space(inode, number, reserve); return; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; @@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) goto out_unlock; mark_all_dquot_dirty(dquots); out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); } EXPORT_SYMBOL(__dquot_free_space); @@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode) unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; struct dquot * const *dquots = inode->i_dquot; + int index; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; @@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode) } spin_unlock(&dq_data_lock); mark_all_dquot_dirty(dquots); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); } EXPORT_SYMBOL(dquot_free_inode); @@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode); * This operation can block, but only after everything is updated * A transaction must be started when entering this function. * + * We are holding reference on transfer_from & transfer_to, no need to + * protect them by srcu_read_lock(). */ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) { @@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) struct dquot_warn warn_from_inodes[MAXQUOTAS]; struct dquot_warn warn_from_space[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return 0; /* Initialize the arrays */ @@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; } - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + + spin_lock(&dq_data_lock); if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_unlock(&dq_data_lock); return 0; } - spin_lock(&dq_data_lock); cur_space = inode_get_bytes(inode); rsv_space = inode_get_rsv_space(inode); space = cur_space + rsv_space; @@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) inode->i_dquot[cnt] = transfer_to[cnt]; } spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); mark_all_dquot_dirty(transfer_from); mark_all_dquot_dirty(transfer_to); @@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) return 0; over_quota: spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); flush_warnings(warn_to); return ret; } diff --git a/fs/super.c b/fs/super.c index d20d5b1..872b26b 100644 --- a/fs/super.c +++ b/fs/super.c @@ -218,7 +218,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); mutex_init(&s->s_dquot.dqio_mutex); mutex_init(&s->s_dquot.dqonoff_mutex); - init_rwsem(&s->s_dquot.dqptr_sem); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; diff --git a/include/linux/quota.h b/include/linux/quota.h index 0f3c5d3..80d345a 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -390,7 +390,6 @@ struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ struct mutex dqio_mutex; /* lock device while I/O in progress */ struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ - struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ -- cgit v1.1 From e625b310edcb7eb43418ff18157561dacf6128f5 Mon Sep 17 00:00:00 2001 From: Himangi Saraogi Date: Tue, 10 Jun 2014 20:11:18 +0530 Subject: fs/ext2/super.c: Drop memory allocation cast Drop cast on the result of kmem_cache_alloc. The semantic patch that makes this change is as follows: // @@ type T; @@ - (T *) (\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\)(...)) // Signed-off-by: Himangi Saraogi Acked-by: Julia Lawall Signed-off-by: Jan Kara --- fs/ext2/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 3750031..b88edc0 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -161,7 +161,7 @@ static struct kmem_cache * ext2_inode_cachep; static struct inode *ext2_alloc_inode(struct super_block *sb) { struct ext2_inode_info *ei; - ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->i_block_alloc_info = NULL; -- cgit v1.1 From e973606cc27527324a34127a3a8cc85bbb25a391 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Wed, 18 Jun 2014 19:38:24 +0200 Subject: udf: use linux/uaccess.h Fix checkpatch warning WARNING: Use #include instead of Cc: Jan Kara Signed-off-by: Fabian Frederick Signed-off-by: Jan Kara --- fs/udf/file.c | 2 +- fs/udf/lowlevel.c | 2 +- fs/udf/super.c | 2 +- fs/udf/symlink.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/udf/file.c b/fs/udf/file.c index d80738f..72b8dae 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -27,7 +27,7 @@ #include "udfdecl.h" #include -#include +#include #include #include /* memset */ #include diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c index 6583fe9..6ad5a45 100644 --- a/fs/udf/lowlevel.c +++ b/fs/udf/lowlevel.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include "udf_sb.h" diff --git a/fs/udf/super.c b/fs/udf/super.c index 3286db0..813da94 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -63,7 +63,7 @@ #include "udf_i.h" #include -#include +#include #define VDS_POS_PRIMARY_VOL_DESC 0 #define VDS_POS_UNALLOC_SPACE_DESC 1 diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index d7c6dbe..6fb7945 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -20,7 +20,7 @@ */ #include "udfdecl.h" -#include +#include #include #include #include -- cgit v1.1 From 2c15ac5bdb0009645f1f5962086021daa6b52d3f Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 4 Jul 2014 21:44:25 +0200 Subject: fs/quota: kernel-doc warning fixes type and id were removed and qid added to quota_send_warning in commit 431f19744d15 ("userns: Convert quota netlink aka quota_send_warning") Cc: Jan Kara Cc: Andrew Morton Signed-off-by: Fabian Frederick Signed-off-by: Jan Kara --- fs/quota/kqid.c | 2 +- fs/quota/netlink.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/quota/kqid.c b/fs/quota/kqid.c index 2f97b0e..ebc5e62 100644 --- a/fs/quota/kqid.c +++ b/fs/quota/kqid.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL(qid_lt); /** * from_kqid - Create a qid from a kqid user-namespace pair. * @targ: The user namespace we want a qid in. - * @kuid: The kernel internal quota identifier to start with. + * @kqid: The kernel internal quota identifier to start with. * * Map @kqid into the user-namespace specified by @targ and * return the resulting qid. diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c index 72d2917..bb2869f 100644 --- a/fs/quota/netlink.c +++ b/fs/quota/netlink.c @@ -32,8 +32,7 @@ static struct genl_family quota_genl_family = { /** * quota_send_warning - Send warning to userspace about exceeded quota - * @type: The quota type: USRQQUOTA, GRPQUOTA,... - * @id: The user or group id of the quota that was exceeded + * @qid: The kernel internal quota identifier. * @dev: The device on which the fs is mounted (sb->s_dev) * @warntype: The type of the warning: QUOTA_NL_... * -- cgit v1.1 From c7ff48212d3ede34aa41842929a5b3ebf8f5ca4d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 9 Jul 2014 15:35:30 +0300 Subject: fs/udf: re-use hex_asc_upper_{hi,lo} macros This patch cleans up udf_translate_to_linux() a bit by using globally defined macros instead of custom code. We can use sprintf(buf, "%04X", ...) there as well, but this one faster. Signed-off-by: Andy Shevchenko Signed-off-by: Jan Kara --- fs/udf/unicode.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 44b815e..afd470e 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c @@ -412,7 +412,6 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int extIndex = 0, newExtIndex = 0, hasExt = 0; unsigned short valueCRC; uint8_t curr; - const uint8_t hexChar[] = "0123456789ABCDEF"; if (udfName[0] == '.' && (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { @@ -477,10 +476,10 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, newIndex = 250; newName[newIndex++] = CRC_MARK; valueCRC = crc_itu_t(0, fidName, fidNameLen); - newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; - newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; - newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; - newName[newIndex++] = hexChar[(valueCRC & 0x000f)]; + newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8); + newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8); + newName[newIndex++] = hex_asc_upper_hi(valueCRC); + newName[newIndex++] = hex_asc_upper_lo(valueCRC); if (hasExt) { newName[newIndex++] = EXT_MARK; -- cgit v1.1 From 3f1be4f9c9dc926c1b96f14f88e91b8b0d1f88fd Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 15 Jul 2014 09:38:51 +0800 Subject: udf: avoid redundant memcpy when writing data in ICB Valid data within i_size in page cache will be copied to ICB cache when we writeback the page by invoking udf_adinicb_writepage, so the copy in udf_adinicb_write_end is redundant. After we remove the copy, it's better to use simple_write_end directly in udf_adinicb_aops instead of udf_adinicb_write_end. Signed-off-by: Chao Yu Signed-off-by: Jan Kara --- fs/udf/file.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/fs/udf/file.c b/fs/udf/file.c index 72b8dae..86c6743 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -100,24 +100,6 @@ static int udf_adinicb_write_begin(struct file *file, return 0; } -static int udf_adinicb_write_end(struct file *file, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = mapping->host; - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); - char *kaddr; - struct udf_inode_info *iinfo = UDF_I(inode); - - kaddr = kmap_atomic(page); - memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, - kaddr + offset, copied); - kunmap_atomic(kaddr); - - return simple_write_end(file, mapping, pos, len, copied, page, fsdata); -} - static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset) @@ -130,7 +112,7 @@ const struct address_space_operations udf_adinicb_aops = { .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, .write_begin = udf_adinicb_write_begin, - .write_end = udf_adinicb_write_end, + .write_end = simple_write_end, .direct_IO = udf_adinicb_direct_IO, }; -- cgit v1.1 From 27d0e5bc85f3341b9ba66f0c23627cf9d7538c9d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 4 Aug 2014 19:51:47 -0400 Subject: reiserfs: fix corruption introduced by balance_leaf refactor Commits f1f007c308e (reiserfs: balance_leaf refactor, pull out balance_leaf_insert_left) and cf22df182bf (reiserfs: balance_leaf refactor, pull out balance_leaf_paste_left) missed that the `body' pointer was getting repositioned. Subsequent users of the pointer would expect it to be repositioned, and as a result, parts of the tree would get overwritten. The most common observed corruption is indirect block pointers being overwritten. Since the body value isn't actually used anymore in the called routines, we can pass back the offset it should be shifted. We constify the body and ih pointers in the balance_leaf as a mostly-free preventative measure. Cc: # 3.16 Reported-and-tested-by: Jeff Chua Signed-off-by: Jeff Mahoney Signed-off-by: Jan Kara --- fs/reiserfs/do_balan.c | 111 ++++++++++++++++++++++++++++--------------------- fs/reiserfs/lbalance.c | 5 ++- fs/reiserfs/reiserfs.h | 9 ++-- 3 files changed, 71 insertions(+), 54 deletions(-) diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 54fdf19..4d5e529 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag) return 0; } -static void balance_leaf_insert_left(struct tree_balance *tb, - struct item_head *ih, const char *body) +static unsigned int balance_leaf_insert_left(struct tree_balance *tb, + struct item_head *const ih, + const char * const body) { int ret; struct buffer_info bi; int n = B_NR_ITEMS(tb->L[0]); + unsigned body_shift_bytes = 0; if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { /* part of new item falls into L[0] */ @@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb, put_ih_item_len(ih, new_item_len); if (tb->lbytes > tb->zeroes_num) { - body += (tb->lbytes - tb->zeroes_num); + body_shift_bytes = tb->lbytes - tb->zeroes_num; tb->zeroes_num = 0; } else tb->zeroes_num -= tb->lbytes; @@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb, tb->insert_size[0] = 0; tb->zeroes_num = 0; } + return body_shift_bytes; } static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { int n = B_NR_ITEMS(tb->L[0]); struct buffer_info bi; @@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, tb->pos_in_item -= tb->lbytes; } -static void balance_leaf_paste_left_shift(struct tree_balance *tb, - struct item_head *ih, - const char *body) +static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb, + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tb->L[0]); struct buffer_info bi; + int body_shift_bytes = 0; if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { balance_leaf_paste_left_shift_dirent(tb, ih, body); - return; + return 0; } RFALSE(tb->lbytes <= 0, @@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, * insert_size[0] */ if (l_n > tb->zeroes_num) { - body += (l_n - tb->zeroes_num); + body_shift_bytes = l_n - tb->zeroes_num; tb->zeroes_num = 0; } else tb->zeroes_num -= l_n; @@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, */ leaf_shift_left(tb, tb->lnum[0], tb->lbytes); } + return body_shift_bytes; } /* appended item will be in L[0] in whole */ static void balance_leaf_paste_left_whole(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tb->L[0]); @@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb, tb->zeroes_num = 0; } -static void balance_leaf_paste_left(struct tree_balance *tb, - struct item_head *ih, const char *body) +static unsigned int balance_leaf_paste_left(struct tree_balance *tb, + struct item_head * const ih, + const char * const body) { /* we must shift the part of the appended item */ if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) - balance_leaf_paste_left_shift(tb, ih, body); + return balance_leaf_paste_left_shift(tb, ih, body); else balance_leaf_paste_left_whole(tb, ih, body); + return 0; } /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ -static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, - const char *body, int flag) +static unsigned int balance_leaf_left(struct tree_balance *tb, + struct item_head * const ih, + const char * const body, int flag) { if (tb->lnum[0] <= 0) - return; + return 0; /* new item or it part falls to L[0], shift it too */ if (tb->item_pos < tb->lnum[0]) { BUG_ON(flag != M_INSERT && flag != M_PASTE); if (flag == M_INSERT) - balance_leaf_insert_left(tb, ih, body); + return balance_leaf_insert_left(tb, ih, body); else /* M_PASTE */ - balance_leaf_paste_left(tb, ih, body); + return balance_leaf_paste_left(tb, ih, body); } else /* new item doesn't fall into L[0] */ leaf_shift_left(tb, tb->lnum[0], tb->lbytes); + return 0; } static void balance_leaf_insert_right(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); @@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb, static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_info bi; @@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, } static void balance_leaf_paste_right_shift(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n_shift, n_rem, r_zeroes_number, version; @@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb, } static void balance_leaf_paste_right_whole(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tbS0); @@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb, } static void balance_leaf_paste_right(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tbS0); @@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb, } /* shift rnum[0] items from S[0] to the right neighbor R[0] */ -static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, - const char *body, int flag) +static void balance_leaf_right(struct tree_balance *tb, + struct item_head * const ih, + const char * const body, int flag) { if (tb->rnum[0] <= 0) return; @@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, } static void balance_leaf_new_nodes_insert(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb, /* we append to directory item */ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, } static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, } static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, } static void balance_leaf_new_nodes_paste(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb, /* Fill new nodes that appear in place of S[0] */ static void balance_leaf_new_nodes(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int flag) @@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb, } static void balance_leaf_finish_node_insert(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_info bi; @@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb, } static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct item_head *pasted = item_head(tbS0, tb->item_pos); @@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, } static void balance_leaf_finish_node_paste(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_info bi; @@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb, * of the affected item which remains in S */ static void balance_leaf_finish_node(struct tree_balance *tb, - struct item_head *ih, - const char *body, int flag) + struct item_head * const ih, + const char * const body, int flag) { /* if we must insert or append into buffer S[0] */ if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { @@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) tb->pos_in_item *= UNFM_P_SIZE; - balance_leaf_left(tb, ih, body, flag); + body += balance_leaf_left(tb, ih, body, flag); /* tb->lnum[0] > 0 */ /* Calculate new item position */ diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index d6744c8..3a74d15 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, /* insert item into the leaf node in position before */ void leaf_insert_into_buf(struct buffer_info *bi, int before, - struct item_head *inserted_item_ih, - const char *inserted_item_body, int zeros_number) + struct item_head * const inserted_item_ih, + const char * const inserted_item_body, + int zeros_number) { struct buffer_head *bh = bi->bi_bh; int nr, free_space; diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index bf53888..735c2c2 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes); void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, int del_num, int del_bytes); void leaf_insert_into_buf(struct buffer_info *bi, int before, - struct item_head *inserted_item_ih, - const char *inserted_item_body, int zeros_number); -void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, - int pos_in_item, int paste_size, const char *body, + struct item_head * const inserted_item_ih, + const char * const inserted_item_body, int zeros_number); +void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, + int pos_in_item, int paste_size, + const char * const body, int zeros_number); void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, int pos_in_item, int cut_size); void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, -- cgit v1.1 From 01777836c87081e4f68c4a43c9abe6114805f91e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 6 Aug 2014 19:43:56 +0200 Subject: reiserfs: Fix use after free in journal teardown If do_journal_release() races with do_journal_end() which requeues delayed works for transaction flushing, we can leave work items for flushing outstanding transactions queued while freeing them. That results in use after free and possible crash in run_timers_softirq(). Fix the problem by not requeueing works if superblock is being shut down (MS_ACTIVE not set) and using cancel_delayed_work_sync() in do_journal_release(). CC: stable@vger.kernel.org Signed-off-by: Jan Kara --- fs/reiserfs/journal.c | 22 ++++++++++++++++------ fs/reiserfs/super.c | 6 +++++- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index e8870de..a88b1b3 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, } } - /* wait for all commits to finish */ - cancel_delayed_work(&SB_JOURNAL(sb)->j_work); /* * We must release the write lock here because @@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, */ reiserfs_write_unlock(sb); + /* + * Cancel flushing of old commits. Note that neither of these works + * will be requeued because superblock is being shutdown and doesn't + * have MS_ACTIVE set. + */ cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); - flush_workqueue(REISERFS_SB(sb)->commit_wq); + /* wait for all commits to finish */ + cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); free_journal_ram(sb); @@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags) if (flush) { flush_commit_list(sb, jl, 1); flush_journal_list(sb, jl, 1); - } else if (!(jl->j_state & LIST_COMMIT_PENDING)) - queue_delayed_work(REISERFS_SB(sb)->commit_wq, - &journal->j_work, HZ / 10); + } else if (!(jl->j_state & LIST_COMMIT_PENDING)) { + /* + * Avoid queueing work when sb is being shut down. Transaction + * will be flushed on journal shutdown. + */ + if (sb->s_flags & MS_ACTIVE) + queue_delayed_work(REISERFS_SB(sb)->commit_wq, + &journal->j_work, HZ / 10); + } /* * if the next transaction has any chance of wrapping, flush diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index a392cef..5fd8f57 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s) struct reiserfs_sb_info *sbi = REISERFS_SB(s); unsigned long delay; - if (s->s_flags & MS_RDONLY) + /* + * Avoid scheduling flush when sb is being shut down. It can race + * with journal shutdown and free still queued delayed work. + */ + if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE)) return; spin_lock(&sbi->old_work_lock); -- cgit v1.1