diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 10:00:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 10:00:14 -0700 |
commit | 9a7259d5c8978bbeb5fdcf64b168f8470d8208a6 (patch) | |
tree | 5c255d4b18622de06c2637c0c4069a384e99466d /fs | |
parent | e9c0f1529c9022afbab16a442382aa9a84a79c41 (diff) | |
parent | e703c206135acb458adb705ec44bcc5d2615b37d (diff) | |
download | op-kernel-dev-9a7259d5c8978bbeb5fdcf64b168f8470d8208a6.zip op-kernel-dev-9a7259d5c8978bbeb5fdcf64b168f8470d8208a6.tar.gz |
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull ext3, UDF, and quota fixes from Jan Kara:
"A couple of ext3 & UDF fixes and also one improvement in quota
locking."
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
ext3: fix start and len arguments handling in ext3_trim_fs()
udf: Fix deadlock in udf_release_file()
udf: Fix file entry logicalBlocksRecorded
udf: Fix handling of i_blocks
quota: Make quota code not call tty layer with dqptr_sem held
udf: Init/maintain file entry checkpoint field
ext3: Update ctime in ext3_splice_branch() only when needed
ext3: Don't call dquot_free_block() if we don't update anything
udf: Remove unnecessary OOM messages
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext3/balloc.c | 84 | ||||
-rw-r--r-- | fs/ext3/inode.c | 9 | ||||
-rw-r--r-- | fs/quota/dquot.c | 189 | ||||
-rw-r--r-- | fs/udf/balloc.c | 84 | ||||
-rw-r--r-- | fs/udf/ialloc.c | 1 | ||||
-rw-r--r-- | fs/udf/inode.c | 20 | ||||
-rw-r--r-- | fs/udf/super.c | 5 | ||||
-rw-r--r-- | fs/udf/udf_i.h | 1 |
8 files changed, 226 insertions, 167 deletions
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index a203892..1e036b7 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -1743,8 +1743,11 @@ allocated: *errp = 0; brelse(bitmap_bh); - dquot_free_block(inode, *count-num); - *count = num; + + if (num < *count) { + dquot_free_block(inode, *count-num); + *count = num; + } trace_ext3_allocate_blocks(inode, goal, num, (unsigned long long)ret_block); @@ -1970,7 +1973,7 @@ static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, sbi = EXT3_SB(sb); /* Walk through the whole group */ - while (start < max) { + while (start <= max) { start = bitmap_search_next_usable_block(start, bitmap_bh, max); if (start < 0) break; @@ -1980,7 +1983,7 @@ static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, * Allocate contiguous free extents by setting bits in the * block bitmap */ - while (next < max + while (next <= max && claim_block(sb_bgl_lock(sbi, group), next, bitmap_bh)) { next++; @@ -2091,73 +2094,74 @@ err_out: */ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) { - ext3_grpblk_t last_block, first_block, free_blocks; - unsigned long first_group, last_group; - unsigned long group, ngroups; + ext3_grpblk_t last_block, first_block; + unsigned long group, first_group, last_group; struct ext3_group_desc *gdp; struct ext3_super_block *es = EXT3_SB(sb)->s_es; - uint64_t start, len, minlen, trimmed; + uint64_t start, minlen, end, trimmed = 0; + ext3_fsblk_t first_data_blk = + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block); ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count); int ret = 0; - start = (range->start >> sb->s_blocksize_bits) + - le32_to_cpu(es->s_first_data_block); - len = range->len >> sb->s_blocksize_bits; + start = range->start >> sb->s_blocksize_bits; + end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = range->minlen >> sb->s_blocksize_bits; - trimmed = 0; - if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb))) + if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) || + unlikely(start >= max_blks)) return -EINVAL; - if (start >= max_blks) - return -EINVAL; - if (start + len > max_blks) - len = max_blks - start; + if (end >= max_blks) + end = max_blks - 1; + if (end <= first_data_blk) + goto out; + if (start < first_data_blk) + start = first_data_blk; - ngroups = EXT3_SB(sb)->s_groups_count; smp_rmb(); /* Determine first and last group to examine based on start and len */ ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start, &first_group, &first_block); - ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len), + ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end, &last_group, &last_block); - last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group; - last_block = EXT3_BLOCKS_PER_GROUP(sb); - if (first_group > last_group) - return -EINVAL; + /* end now represents the last block to discard in this group */ + end = EXT3_BLOCKS_PER_GROUP(sb) - 1; for (group = first_group; group <= last_group; group++) { gdp = ext3_get_group_desc(sb, group, NULL); if (!gdp) break; - free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); - if (free_blocks < minlen) - continue; - /* * For all the groups except the last one, last block will - * always be EXT3_BLOCKS_PER_GROUP(sb), so we only need to - * change it for the last group in which case first_block + - * len < EXT3_BLOCKS_PER_GROUP(sb). + * always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to + * change it for the last group, note that last_block is + * already computed earlier by ext3_get_group_no_and_offset() */ - if (first_block + len < EXT3_BLOCKS_PER_GROUP(sb)) - last_block = first_block + len; - len -= last_block - first_block; + if (group == last_group) + end = last_block; - ret = ext3_trim_all_free(sb, group, first_block, - last_block, minlen); - if (ret < 0) - break; + if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) { + ret = ext3_trim_all_free(sb, group, first_block, + end, minlen); + if (ret < 0) + break; + trimmed += ret; + } - trimmed += ret; + /* + * For every group except the first one, we are sure + * that the first block to discard will be block #0. + */ first_block = 0; } - if (ret >= 0) + if (ret > 0) ret = 0; - range->len = trimmed * sb->s_blocksize; +out: + range->len = trimmed * sb->s_blocksize; return ret; } diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 2d0afec..6d34186 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -756,6 +756,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, struct ext3_block_alloc_info *block_i; ext3_fsblk_t current_block; struct ext3_inode_info *ei = EXT3_I(inode); + struct timespec now; block_i = ei->i_block_alloc_info; /* @@ -795,9 +796,11 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, } /* We are done with atomic stuff, now do the rest of housekeeping */ - - inode->i_ctime = CURRENT_TIME_SEC; - ext3_mark_inode_dirty(handle, inode); + now = CURRENT_TIME_SEC; + if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) { + inode->i_ctime = now; + ext3_mark_inode_dirty(handle, inode); + } /* ext3_mark_inode_dirty already updated i_sync_tid */ atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8b4f12b..d69a1d1 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1110,6 +1110,13 @@ static void dquot_decr_space(struct dquot *dquot, qsize_t number) clear_bit(DQ_BLKS_B, &dquot->dq_flags); } +struct dquot_warn { + struct super_block *w_sb; + qid_t w_dq_id; + short w_dq_type; + short w_type; +}; + static int warning_issued(struct dquot *dquot, const int warntype) { int flag = (warntype == QUOTA_NL_BHARDWARN || @@ -1125,41 +1132,42 @@ static int warning_issued(struct dquot *dquot, const int warntype) #ifdef CONFIG_PRINT_QUOTA_WARNING static int flag_print_warnings = 1; -static int need_print_warning(struct dquot *dquot) +static int need_print_warning(struct dquot_warn *warn) { if (!flag_print_warnings) return 0; - switch (dquot->dq_type) { + switch (warn->w_dq_type) { case USRQUOTA: - return current_fsuid() == dquot->dq_id; + return current_fsuid() == warn->w_dq_id; case GRPQUOTA: - return in_group_p(dquot->dq_id); + return in_group_p(warn->w_dq_id); } return 0; } /* Print warning to user which exceeded quota */ -static void print_warning(struct dquot *dquot, const int warntype) +static void print_warning(struct dquot_warn *warn) { char *msg = NULL; struct tty_struct *tty; + int warntype = warn->w_type; if (warntype == QUOTA_NL_IHARDBELOW || warntype == QUOTA_NL_ISOFTBELOW || warntype == QUOTA_NL_BHARDBELOW || - warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) + warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn)) return; tty = get_current_tty(); if (!tty) return; - tty_write_message(tty, dquot->dq_sb->s_id); + tty_write_message(tty, warn->w_sb->s_id); if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) tty_write_message(tty, ": warning, "); else tty_write_message(tty, ": write failed, "); - tty_write_message(tty, quotatypes[dquot->dq_type]); + tty_write_message(tty, quotatypes[warn->w_dq_type]); switch (warntype) { case QUOTA_NL_IHARDWARN: msg = " file limit reached.\r\n"; @@ -1185,26 +1193,34 @@ static void print_warning(struct dquot *dquot, const int warntype) } #endif +static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot, + int warntype) +{ + if (warning_issued(dquot, warntype)) + return; + warn->w_type = warntype; + warn->w_sb = dquot->dq_sb; + warn->w_dq_id = dquot->dq_id; + warn->w_dq_type = dquot->dq_type; +} + /* * Write warnings to the console and send warning messages over netlink. * - * Note that this function can sleep. + * Note that this function can call into tty and networking code. */ -static void flush_warnings(struct dquot *const *dquots, char *warntype) +static void flush_warnings(struct dquot_warn *warn) { - struct dquot *dq; int i; for (i = 0; i < MAXQUOTAS; i++) { - dq = dquots[i]; - if (dq && warntype[i] != QUOTA_NL_NOWARN && - !warning_issued(dq, warntype[i])) { + if (warn[i].w_type == QUOTA_NL_NOWARN) + continue; #ifdef CONFIG_PRINT_QUOTA_WARNING - print_warning(dq, warntype[i]); + print_warning(&warn[i]); #endif - quota_send_warning(dq->dq_type, dq->dq_id, - dq->dq_sb->s_dev, warntype[i]); - } + quota_send_warning(warn[i].w_dq_type, warn[i].w_dq_id, + warn[i].w_sb->s_dev, warn[i].w_type); } } @@ -1218,11 +1234,11 @@ static int ignore_hardlimit(struct dquot *dquot) } /* needs dq_data_lock */ -static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) +static int check_idq(struct dquot *dquot, qsize_t inodes, + struct dquot_warn *warn) { qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; - *warntype = QUOTA_NL_NOWARN; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return 0; @@ -1230,7 +1246,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) if (dquot->dq_dqb.dqb_ihardlimit && newinodes > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { - *warntype = QUOTA_NL_IHARDWARN; + prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); return -EDQUOT; } @@ -1239,14 +1255,14 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { - *warntype = QUOTA_NL_ISOFTLONGWARN; + prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); return -EDQUOT; } if (dquot->dq_dqb.dqb_isoftlimit && newinodes > dquot->dq_dqb.dqb_isoftlimit && dquot->dq_dqb.dqb_itime == 0) { - *warntype = QUOTA_NL_ISOFTWARN; + prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; } @@ -1255,12 +1271,12 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) } /* needs dq_data_lock */ -static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) +static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, + struct dquot_warn *warn) { qsize_t tspace; struct super_block *sb = dquot->dq_sb; - *warntype = QUOTA_NL_NOWARN; if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return 0; @@ -1272,7 +1288,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war tspace > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (!prealloc) - *warntype = QUOTA_NL_BHARDWARN; + prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); return -EDQUOT; } @@ -1282,7 +1298,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war get_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) - *warntype = QUOTA_NL_BSOFTLONGWARN; + prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); return -EDQUOT; } @@ -1290,7 +1306,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { - *warntype = QUOTA_NL_BSOFTWARN; + prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; } @@ -1543,10 +1559,9 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { int cnt, ret = 0; - char warntype[MAXQUOTAS]; - int warn = flags & DQUOT_SPACE_WARN; + struct dquot_warn warn[MAXQUOTAS]; + struct dquot **dquots = inode->i_dquot; int reserve = flags & DQUOT_SPACE_RESERVE; - int nofail = flags & DQUOT_SPACE_NOFAIL; /* * First test before acquiring mutex - solves deadlocks when we @@ -1559,36 +1574,36 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; + warn[cnt].w_type = QUOTA_NL_NOWARN; spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!inode->i_dquot[cnt]) + if (!dquots[cnt]) continue; - ret = check_bdq(inode->i_dquot[cnt], number, !warn, - warntype+cnt); - if (ret && !nofail) { + ret = check_bdq(dquots[cnt], number, + !(flags & DQUOT_SPACE_WARN), &warn[cnt]); + if (ret && !(flags & DQUOT_SPACE_NOFAIL)) { spin_unlock(&dq_data_lock); goto out_flush_warn; } } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!inode->i_dquot[cnt]) + if (!dquots[cnt]) continue; if (reserve) - dquot_resv_space(inode->i_dquot[cnt], number); + dquot_resv_space(dquots[cnt], number); else - dquot_incr_space(inode->i_dquot[cnt], number); + dquot_incr_space(dquots[cnt], number); } inode_incr_space(inode, number, reserve); spin_unlock(&dq_data_lock); if (reserve) goto out_flush_warn; - mark_all_dquot_dirty(inode->i_dquot); + mark_all_dquot_dirty(dquots); out_flush_warn: - flush_warnings(inode->i_dquot, warntype); up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + flush_warnings(warn); out: return ret; } @@ -1600,36 +1615,37 @@ EXPORT_SYMBOL(__dquot_alloc_space); int dquot_alloc_inode(const struct inode *inode) { int cnt, ret = 0; - char warntype[MAXQUOTAS]; + struct dquot_warn warn[MAXQUOTAS]; + struct dquot * const *dquots = inode->i_dquot; /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; + warn[cnt].w_type = QUOTA_NL_NOWARN; down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!inode->i_dquot[cnt]) + if (!dquots[cnt]) continue; - ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt); + ret = check_idq(dquots[cnt], 1, &warn[cnt]); if (ret) goto warn_put_all; } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!inode->i_dquot[cnt]) + if (!dquots[cnt]) continue; - dquot_incr_inodes(inode->i_dquot[cnt], 1); + dquot_incr_inodes(dquots[cnt], 1); } warn_put_all: spin_unlock(&dq_data_lock); if (ret == 0) - mark_all_dquot_dirty(inode->i_dquot); - flush_warnings(inode->i_dquot, warntype); + mark_all_dquot_dirty(dquots); up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + flush_warnings(warn); return ret; } EXPORT_SYMBOL(dquot_alloc_inode); @@ -1669,7 +1685,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); void __dquot_free_space(struct inode *inode, qsize_t number, int flags) { unsigned int cnt; - char warntype[MAXQUOTAS]; + struct dquot_warn warn[MAXQUOTAS]; + struct dquot **dquots = inode->i_dquot; int reserve = flags & DQUOT_SPACE_RESERVE; /* First test before acquiring mutex - solves deadlocks when we @@ -1682,23 +1699,28 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!inode->i_dquot[cnt]) + int wtype; + + warn[cnt].w_type = QUOTA_NL_NOWARN; + if (!dquots[cnt]) continue; - warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); + wtype = info_bdq_free(dquots[cnt], number); + if (wtype != QUOTA_NL_NOWARN) + prepare_warning(&warn[cnt], dquots[cnt], wtype); if (reserve) - dquot_free_reserved_space(inode->i_dquot[cnt], number); + dquot_free_reserved_space(dquots[cnt], number); else - dquot_decr_space(inode->i_dquot[cnt], number); + dquot_decr_space(dquots[cnt], number); } inode_decr_space(inode, number, reserve); spin_unlock(&dq_data_lock); if (reserve) goto out_unlock; - mark_all_dquot_dirty(inode->i_dquot); + mark_all_dquot_dirty(dquots); out_unlock: - flush_warnings(inode->i_dquot, warntype); up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + flush_warnings(warn); } EXPORT_SYMBOL(__dquot_free_space); @@ -1708,7 +1730,8 @@ EXPORT_SYMBOL(__dquot_free_space); void dquot_free_inode(const struct inode *inode) { unsigned int cnt; - char warntype[MAXQUOTAS]; + struct dquot_warn warn[MAXQUOTAS]; + struct dquot * const *dquots = inode->i_dquot; /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ @@ -1718,15 +1741,20 @@ void dquot_free_inode(const struct inode *inode) down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!inode->i_dquot[cnt]) + int wtype; + + warn[cnt].w_type = QUOTA_NL_NOWARN; + if (!dquots[cnt]) continue; - warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1); - dquot_decr_inodes(inode->i_dquot[cnt], 1); + wtype = info_idq_free(dquots[cnt], 1); + if (wtype != QUOTA_NL_NOWARN) + prepare_warning(&warn[cnt], dquots[cnt], wtype); + dquot_decr_inodes(dquots[cnt], 1); } spin_unlock(&dq_data_lock); - mark_all_dquot_dirty(inode->i_dquot); - flush_warnings(inode->i_dquot, warntype); + mark_all_dquot_dirty(dquots); up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + flush_warnings(warn); } EXPORT_SYMBOL(dquot_free_inode); @@ -1747,16 +1775,20 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) struct dquot *transfer_from[MAXQUOTAS] = {}; int cnt, ret = 0; char is_valid[MAXQUOTAS] = {}; - char warntype_to[MAXQUOTAS]; - char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; + struct dquot_warn warn_to[MAXQUOTAS]; + struct dquot_warn warn_from_inodes[MAXQUOTAS]; + struct dquot_warn warn_from_space[MAXQUOTAS]; /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return 0; /* Initialize the arrays */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype_to[cnt] = QUOTA_NL_NOWARN; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + warn_to[cnt].w_type = QUOTA_NL_NOWARN; + warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; + warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; + } down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1778,10 +1810,10 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) continue; is_valid[cnt] = 1; transfer_from[cnt] = inode->i_dquot[cnt]; - ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt); + ret = check_idq(transfer_to[cnt], 1, &warn_to[cnt]); if (ret) goto over_quota; - ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt); + ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]); if (ret) goto over_quota; } @@ -1794,10 +1826,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) continue; /* Due to IO error we might not have transfer_from[] structure */ if (transfer_from[cnt]) { - warntype_from_inodes[cnt] = - info_idq_free(transfer_from[cnt], 1); - warntype_from_space[cnt] = - info_bdq_free(transfer_from[cnt], space); + int wtype; + wtype = info_idq_free(transfer_from[cnt], 1); + if (wtype != QUOTA_NL_NOWARN) + prepare_warning(&warn_from_inodes[cnt], + transfer_from[cnt], wtype); + wtype = info_bdq_free(transfer_from[cnt], space); + if (wtype != QUOTA_NL_NOWARN) + prepare_warning(&warn_from_space[cnt], + transfer_from[cnt], wtype); dquot_decr_inodes(transfer_from[cnt], 1); dquot_decr_space(transfer_from[cnt], cur_space); dquot_free_reserved_space(transfer_from[cnt], @@ -1815,9 +1852,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) mark_all_dquot_dirty(transfer_from); mark_all_dquot_dirty(transfer_to); - flush_warnings(transfer_to, warntype_to); - flush_warnings(transfer_from, warntype_from_inodes); - flush_warnings(transfer_from, warntype_from_space); + flush_warnings(warn_to); + flush_warnings(warn_from_inodes); + flush_warnings(warn_from_space); /* Pass back references to put */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (is_valid[cnt]) @@ -1826,7 +1863,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) over_quota: spin_unlock(&dq_data_lock); up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - flush_warnings(transfer_to, warntype_to); + flush_warnings(warn_to); return ret; } EXPORT_SYMBOL(__dquot_transfer); diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 987585b..1ba2baa 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -105,7 +105,6 @@ static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt) } static void udf_bitmap_free_blocks(struct super_block *sb, - struct inode *inode, struct udf_bitmap *bitmap, struct kernel_lb_addr *bloc, uint32_t offset, @@ -172,7 +171,6 @@ error_return: } static int udf_bitmap_prealloc_blocks(struct super_block *sb, - struct inode *inode, struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block, uint32_t block_count) @@ -223,7 +221,6 @@ out: } static int udf_bitmap_new_block(struct super_block *sb, - struct inode *inode, struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err) { @@ -349,7 +346,6 @@ error_return: } static void udf_table_free_blocks(struct super_block *sb, - struct inode *inode, struct inode *table, struct kernel_lb_addr *bloc, uint32_t offset, @@ -581,7 +577,6 @@ error_return: } static int udf_table_prealloc_blocks(struct super_block *sb, - struct inode *inode, struct inode *table, uint16_t partition, uint32_t first_block, uint32_t block_count) { @@ -643,7 +638,6 @@ static int udf_table_prealloc_blocks(struct super_block *sb, } static int udf_table_new_block(struct super_block *sb, - struct inode *inode, struct inode *table, uint16_t partition, uint32_t goal, int *err) { @@ -743,18 +737,23 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode, struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { - udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap, + udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { - udf_table_free_blocks(sb, inode, map->s_uspace.s_table, + udf_table_free_blocks(sb, map->s_uspace.s_table, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { - udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap, + udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { - udf_table_free_blocks(sb, inode, map->s_fspace.s_table, + udf_table_free_blocks(sb, map->s_fspace.s_table, bloc, offset, count); } + + if (inode) { + inode_sub_bytes(inode, + ((sector_t)count) << sb->s_blocksize_bits); + } } inline int udf_prealloc_blocks(struct super_block *sb, @@ -763,29 +762,34 @@ inline int udf_prealloc_blocks(struct super_block *sb, uint32_t block_count) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; + sector_t allocated; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) - return udf_bitmap_prealloc_blocks(sb, inode, - map->s_uspace.s_bitmap, - partition, first_block, - block_count); + allocated = udf_bitmap_prealloc_blocks(sb, + map->s_uspace.s_bitmap, + partition, first_block, + block_count); else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) - return udf_table_prealloc_blocks(sb, inode, - map->s_uspace.s_table, - partition, first_block, - block_count); + allocated = udf_table_prealloc_blocks(sb, + map->s_uspace.s_table, + partition, first_block, + block_count); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) - return udf_bitmap_prealloc_blocks(sb, inode, - map->s_fspace.s_bitmap, - partition, first_block, - block_count); + allocated = udf_bitmap_prealloc_blocks(sb, + map->s_fspace.s_bitmap, + partition, first_block, + block_count); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) - return udf_table_prealloc_blocks(sb, inode, - map->s_fspace.s_table, - partition, first_block, - block_count); + allocated = udf_table_prealloc_blocks(sb, + map->s_fspace.s_table, + partition, first_block, + block_count); else return 0; + + if (inode && allocated > 0) + inode_add_bytes(inode, allocated << sb->s_blocksize_bits); + return allocated; } inline int udf_new_block(struct super_block *sb, @@ -793,25 +797,29 @@ inline int udf_new_block(struct super_block *sb, uint16_t partition, uint32_t goal, int *err) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; + int block; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) - return udf_bitmap_new_block(sb, inode, - map->s_uspace.s_bitmap, - partition, goal, err); + block = udf_bitmap_new_block(sb, + map->s_uspace.s_bitmap, + partition, goal, err); else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) - return udf_table_new_block(sb, inode, - map->s_uspace.s_table, - partition, goal, err); - else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) - return udf_bitmap_new_block(sb, inode, - map->s_fspace.s_bitmap, + block = udf_table_new_block(sb, + map->s_uspace.s_table, partition, goal, err); + else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) + block = udf_bitmap_new_block(sb, + map->s_fspace.s_bitmap, + partition, goal, err); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) - return udf_table_new_block(sb, inode, - map->s_fspace.s_table, - partition, goal, err); + block = udf_table_new_block(sb, + map->s_fspace.s_table, + partition, goal, err); else { *err = -EIO; return 0; } + if (inode && block) + inode_add_bytes(inode, sb->s_blocksize); + return block; } diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 05ab481..7e5aae4 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -116,6 +116,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) iinfo->i_lenEAttr = 0; iinfo->i_lenAlloc = 0; iinfo->i_use = 0; + iinfo->i_checkpoint = 1; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 7699df7..7d75280 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1358,6 +1358,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); + iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr; } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << @@ -1379,6 +1380,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); + iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); offset = sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr; } @@ -1495,6 +1497,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; + uint64_t lb_recorded; uint32_t udfperms; uint16_t icbflags; uint16_t crclen; @@ -1589,13 +1592,18 @@ static int udf_update_inode(struct inode *inode, int do_sync) dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); } + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) + lb_recorded = 0; /* No extents => no blocks! */ + else + lb_recorded = + (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> + (blocksize_bits - 9); + if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); - fe->logicalBlocksRecorded = cpu_to_le64( - (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> - (blocksize_bits - 9)); + fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); @@ -1607,6 +1615,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) fe->uniqueID = cpu_to_le64(iinfo->i_unique); fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); + fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); crclen = sizeof(struct fileEntry); } else { @@ -1615,9 +1624,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = cpu_to_le64(inode->i_size); - efe->logicalBlocksRecorded = cpu_to_le64( - (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> - (blocksize_bits - 9)); + efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && @@ -1646,6 +1653,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) efe->uniqueID = cpu_to_le64(iinfo->i_unique); efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); + efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } diff --git a/fs/udf/super.c b/fs/udf/super.c index 85067b4..ac8a348 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -950,11 +950,8 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) else bitmap = vzalloc(size); /* TODO: get rid of vzalloc */ - if (bitmap == NULL) { - udf_err(sb, "Unable to allocate space for bitmap and %d buffer_head pointers\n", - nr_groups); + if (bitmap == NULL) return NULL; - } bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1); bitmap->s_nr_groups = nr_groups; diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index d1bd31e..bb8309d 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -23,6 +23,7 @@ struct udf_inode_info { __u64 i_lenExtents; __u32 i_next_alloc_block; __u32 i_next_alloc_goal; + __u32 i_checkpoint; unsigned i_alloc_type : 3; unsigned i_efe : 1; /* extendedFileEntry */ unsigned i_use : 1; /* unallocSpaceEntry */ |