diff options
-rw-r--r-- | fs/jfs/jfs_dmap.c | 46 | ||||
-rw-r--r-- | fs/jfs/jfs_dtree.c | 13 | ||||
-rw-r--r-- | fs/jfs/jfs_logmgr.c | 39 | ||||
-rw-r--r-- | fs/jfs/jfs_logmgr.h | 2 | ||||
-rw-r--r-- | fs/jfs/jfs_metapage.c | 11 | ||||
-rw-r--r-- | fs/jfs/jfs_txnmgr.c | 10 | ||||
-rw-r--r-- | fs/jfs/super.c | 2 |
7 files changed, 68 insertions, 55 deletions
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 0732f20..c739626 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -75,7 +75,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval); static void dbBackSplit(dmtree_t * tp, int leafno); -static void dbJoin(dmtree_t * tp, int leafno, int newval); +static int dbJoin(dmtree_t * tp, int leafno, int newval); static void dbAdjTree(dmtree_t * tp, int leafno, int newval); static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level); @@ -98,8 +98,8 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks); static int dbFindBits(u32 word, int l2nb); static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno); static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx); -static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, - int nblocks); +static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, + int nblocks); static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static int dbMaxBud(u8 * cp); @@ -378,6 +378,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks) /* free the blocks. */ if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { + jfs_error(ip->i_sb, "dbFree: error in block map\n"); release_metapage(mp); IREAD_UNLOCK(ipbmap); return (rc); @@ -2020,7 +2021,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { s8 oldroot; - int rc, word; + int rc = 0, word; /* save the current value of the root (i.e. maximum free string) * of the dmap tree. @@ -2028,11 +2029,11 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, oldroot = dp->tree.stree[ROOT]; /* free the specified (blocks) bits */ - dbFreeBits(bmp, dp, blkno, nblocks); + rc = dbFreeBits(bmp, dp, blkno, nblocks); - /* if the root has not changed, done. */ - if (dp->tree.stree[ROOT] == oldroot) - return (0); + /* if error or the root has not changed, done. */ + if (rc || (dp->tree.stree[ROOT] == oldroot)) + return (rc); /* root changed. bubble the change up to the dmap control pages. * if the adjustment of the upper level control pages fails, @@ -2221,15 +2222,16 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, * blkno - starting block number of the bits to be freed. * nblocks - number of bits to be freed. * - * RETURN VALUES: none + * RETURN VALUES: 0 for success * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ -static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, +static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; dmtree_t *tp = (dmtree_t *) & dp->tree; + int rc = 0; int size; /* determine the bit number and word within the dmap of the @@ -2278,8 +2280,10 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, /* update the leaf for this dmap word. */ - dbJoin(tp, word, - dbMaxBud((u8 *) & dp->wmap[word])); + rc = dbJoin(tp, word, + dbMaxBud((u8 *) & dp->wmap[word])); + if (rc) + return rc; word += 1; } else { @@ -2310,7 +2314,9 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, /* update the leaf. */ - dbJoin(tp, word, size); + rc = dbJoin(tp, word, size); + if (rc) + return rc; /* get the number of dmap words handled. */ @@ -2357,6 +2363,8 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, } BMAP_UNLOCK(bmp); + + return 0; } @@ -2464,7 +2472,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level) } dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval); } else { - dbJoin((dmtree_t *) dcp, leafno, newval); + rc = dbJoin((dmtree_t *) dcp, leafno, newval); + if (rc) + return rc; } /* check if the root of the current dmap control page changed due @@ -2689,7 +2699,7 @@ static void dbBackSplit(dmtree_t * tp, int leafno) * * RETURN VALUES: none */ -static void dbJoin(dmtree_t * tp, int leafno, int newval) +static int dbJoin(dmtree_t * tp, int leafno, int newval) { int budsz, buddy; s8 *leaf; @@ -2729,7 +2739,9 @@ static void dbJoin(dmtree_t * tp, int leafno, int newval) if (newval > leaf[buddy]) break; - assert(newval == leaf[buddy]); + /* It shouldn't be less */ + if (newval < leaf[buddy]) + return -EIO; /* check which (leafno or buddy) is the left buddy. * the left buddy gets to claim the blocks resulting @@ -2761,6 +2773,8 @@ static void dbJoin(dmtree_t * tp, int leafno, int newval) /* update the leaf value. */ dbAdjTree(tp, leafno, newval); + + return 0; } diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 73b5fc7..404f33e 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -381,9 +381,12 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) * It's time to move the inline table to an external * page and begin to build the xtree */ - if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage) || - dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) - goto clean_up; /* No space */ + if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage)) + goto clean_up; + if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { + DQUOT_FREE_BLOCK(ip, sbi->nbperpage); + goto clean_up; + } /* * Save the table, we're going to overwrite it with the @@ -397,13 +400,15 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) xtInitRoot(tid, ip); /* - * Allocate the first block & add it to the xtree + * Add the first block to the xtree */ if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { /* This really shouldn't fail */ jfs_warn("add_index: xtInsert failed!"); memcpy(&jfs_ip->i_dirtable, temp_table, sizeof (temp_table)); + dbFree(ip, xaddr, sbi->nbperpage); + DQUOT_FREE_BLOCK(ip, sbi->nbperpage); goto clean_up; } ip->i_size = PSIZE; diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 79d0762..d27bac6 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -191,7 +191,7 @@ static int lbmIOWait(struct lbuf * bp, int flag); static bio_end_io_t lbmIODone; static void lbmStartIO(struct lbuf * bp); static void lmGCwrite(struct jfs_log * log, int cant_block); -static int lmLogSync(struct jfs_log * log, int nosyncwait); +static int lmLogSync(struct jfs_log * log, int hard_sync); @@ -915,19 +915,17 @@ static void lmPostGC(struct lbuf * bp) * if new sync address is available * (normally the case if sync() is executed by back-ground * process). - * if not, explicitly run jfs_blogsync() to initiate - * getting of new sync address. * calculate new value of i_nextsync which determines when * this code is called again. * * PARAMETERS: log - log structure - * nosyncwait - 1 if called asynchronously + * hard_sync - 1 to force all metadata to be written * * RETURN: 0 * * serialization: LOG_LOCK() held on entry/exit */ -static int lmLogSync(struct jfs_log * log, int nosyncwait) +static int lmLogSync(struct jfs_log * log, int hard_sync) { int logsize; int written; /* written since last syncpt */ @@ -941,11 +939,18 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) unsigned long flags; /* push dirty metapages out to disk */ - list_for_each_entry(sbi, &log->sb_list, log_list) { - filemap_flush(sbi->ipbmap->i_mapping); - filemap_flush(sbi->ipimap->i_mapping); - filemap_flush(sbi->direct_inode->i_mapping); - } + if (hard_sync) + list_for_each_entry(sbi, &log->sb_list, log_list) { + filemap_fdatawrite(sbi->ipbmap->i_mapping); + filemap_fdatawrite(sbi->ipimap->i_mapping); + filemap_fdatawrite(sbi->direct_inode->i_mapping); + } + else + list_for_each_entry(sbi, &log->sb_list, log_list) { + filemap_flush(sbi->ipbmap->i_mapping); + filemap_flush(sbi->ipimap->i_mapping); + filemap_flush(sbi->direct_inode->i_mapping); + } /* * forward syncpt @@ -1021,16 +1026,13 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) /* next syncpt trigger = written + more */ log->nextsync = written + more; - /* return if lmLogSync() from outside of transaction, e.g., sync() */ - if (nosyncwait) - return lsn; - /* if number of bytes written from last sync point is more * than 1/4 of the log size, stop new transactions from * starting until all current transactions are completed * by setting syncbarrier flag. */ - if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) { + if (!test_bit(log_SYNCBARRIER, &log->flag) && + (written > LOGSYNC_BARRIER(logsize)) && log->active) { set_bit(log_SYNCBARRIER, &log->flag); jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn, log->syncpt); @@ -1048,11 +1050,12 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) * * FUNCTION: write log SYNCPT record for specified log * - * PARAMETERS: log - log structure + * PARAMETERS: log - log structure + * hard_sync - set to 1 to force metadata to be written */ -void jfs_syncpt(struct jfs_log *log) +void jfs_syncpt(struct jfs_log *log, int hard_sync) { LOG_LOCK(log); - lmLogSync(log, 1); + lmLogSync(log, hard_sync); LOG_UNLOCK(log); } diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h index 747114c..e4978b5 100644 --- a/fs/jfs/jfs_logmgr.h +++ b/fs/jfs/jfs_logmgr.h @@ -510,6 +510,6 @@ extern int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize); extern int lmGroupCommit(struct jfs_log *, struct tblock *); extern int jfsIOWait(void *); extern void jfs_flush_journal(struct jfs_log * log, int wait); -extern void jfs_syncpt(struct jfs_log *log); +extern void jfs_syncpt(struct jfs_log *log, int hard_sync); #endif /* _H_JFS_LOGMGR */ diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 6c5485d..13d7e3f 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -561,7 +561,6 @@ static int metapage_releasepage(struct page *page, int gfp_mask) dump_mem("page", page, sizeof(struct page)); dump_stack(); } - WARN_ON(mp->lsn); if (mp->lsn) remove_from_logsync(mp); remove_metapage(page, mp); @@ -641,7 +640,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, } else { page = read_cache_page(mapping, page_index, (filler_t *)mapping->a_ops->readpage, NULL); - if (IS_ERR(page)) { + if (IS_ERR(page) || !PageUptodate(page)) { jfs_err("read_cache_page failed!"); return NULL; } @@ -783,14 +782,6 @@ void release_metapage(struct metapage * mp) if (test_bit(META_discard, &mp->flag) && !mp->count) { clear_page_dirty(page); ClearPageUptodate(page); -#ifdef _NOT_YET - if (page->mapping) { - /* Remove from page cache and page cache reference */ - remove_from_page_cache(page); - page_cache_release(page); - metapage_releasepage(page, 0); - } -#endif } #else /* Try to keep metapages from using up too much memory */ diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index 121c981..3555acf 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -552,6 +552,11 @@ void txEnd(tid_t tid) * synchronize with logsync barrier */ if (test_bit(log_SYNCBARRIER, &log->flag)) { + TXN_UNLOCK(); + + /* write dirty metadata & forward log syncpt */ + jfs_syncpt(log, 1); + jfs_info("log barrier off: 0x%x", log->lsn); /* enable new transactions start */ @@ -560,11 +565,6 @@ void txEnd(tid_t tid) /* wakeup all waitors for logsync barrier */ TXN_WAKEUP(&log->syncwait); - TXN_UNLOCK(); - - /* forward log syncpt */ - jfs_syncpt(log); - goto wakeup; } } diff --git a/fs/jfs/super.c b/fs/jfs/super.c index ee32211..c2abdae 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -531,7 +531,7 @@ static int jfs_sync_fs(struct super_block *sb, int wait) /* log == NULL indicates read-only mount */ if (log) { jfs_flush_journal(log, wait); - jfs_syncpt(log); + jfs_syncpt(log, 0); } return 0; |