diff options
author | Christoph Hellwig <hch@infradead.org> | 2008-10-30 16:58:32 +1100 |
---|---|---|
committer | Lachlan McIlroy <lachlan@sgi.com> | 2008-10-30 16:58:32 +1100 |
commit | 4a26e66e7728112f0e1cd7eca3bcc430b3a221c9 (patch) | |
tree | 1944f9aa65476c963658b7b4679f7a64287adfb6 /fs/xfs | |
parent | fd6bcc5b63051392ba709a8fd33173b263669e0a (diff) | |
download | op-kernel-dev-4a26e66e7728112f0e1cd7eca3bcc430b3a221c9.zip op-kernel-dev-4a26e66e7728112f0e1cd7eca3bcc430b3a221c9.tar.gz |
[XFS] add keys_inorder and recs_inorder btree methods
Add methods to check whether two keys/records are in the righ order. This
replaces the xfs_btree_check_key and xfs_btree_check_rec methods. For the
callers from xfs_bmap.c just opencode the bmbt-specific asserts.
SGI-PV: 985583
SGI-Modid: xfs-linux-melb:xfs-kern:32208a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Bill O'Donnell <billodo@sgi.com>
Signed-off-by: David Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_alloc_btree.c | 44 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap.c | 11 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_btree.c | 28 | ||||
-rw-r--r-- | fs/xfs/xfs_btree.c | 150 | ||||
-rw-r--r-- | fs/xfs/xfs_btree.h | 36 | ||||
-rw-r--r-- | fs/xfs/xfs_ialloc_btree.c | 27 |
6 files changed, 135 insertions, 161 deletions
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 4d44f03..9e63f8c 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -311,6 +311,45 @@ xfs_allocbt_kill_root( return 0; } +#ifdef DEBUG +STATIC int +xfs_allocbt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return be32_to_cpu(k1->alloc.ar_startblock) < + be32_to_cpu(k2->alloc.ar_startblock); + } else { + return be32_to_cpu(k1->alloc.ar_blockcount) < + be32_to_cpu(k2->alloc.ar_blockcount) || + (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && + be32_to_cpu(k1->alloc.ar_startblock) < + be32_to_cpu(k2->alloc.ar_startblock)); + } +} + +STATIC int +xfs_allocbt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return be32_to_cpu(r1->alloc.ar_startblock) + + be32_to_cpu(r1->alloc.ar_blockcount) <= + be32_to_cpu(r2->alloc.ar_startblock); + } else { + return be32_to_cpu(r1->alloc.ar_blockcount) < + be32_to_cpu(r2->alloc.ar_blockcount) || + (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && + be32_to_cpu(r1->alloc.ar_startblock) < + be32_to_cpu(r2->alloc.ar_startblock)); + } +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_allocbt_trace_buf; @@ -395,6 +434,11 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, .key_diff = xfs_allocbt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_allocbt_keys_inorder, + .recs_inorder = xfs_allocbt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_allocbt_trace_enter, .trace_cursor = xfs_allocbt_trace_cursor, diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 5cceb8d..b7f99d7 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -6195,7 +6195,8 @@ xfs_check_block( } if (prevp) { - xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp); + ASSERT(be64_to_cpu(prevp->br_startoff) < + be64_to_cpu(keyp->br_startoff)); } prevp = keyp; @@ -6338,11 +6339,15 @@ xfs_bmap_check_leaf_extents( ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1); if (i) { - xfs_btree_check_rec(XFS_BTNUM_BMAP, &last, ep); + ASSERT(xfs_bmbt_disk_get_startoff(&last) + + xfs_bmbt_disk_get_blockcount(&last) <= + xfs_bmbt_disk_get_startoff(ep)); } for (j = 1; j < num_recs; j++) { nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1); - xfs_btree_check_rec(XFS_BTNUM_BMAP, ep, nextp); + ASSERT(xfs_bmbt_disk_get_startoff(ep) + + xfs_bmbt_disk_get_blockcount(ep) <= + xfs_bmbt_disk_get_startoff(nextp)); ep = nextp; } diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 7a02d39..c5eeb32 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -699,6 +699,29 @@ xfs_bmbt_key_diff( cur->bc_rec.b.br_startoff; } +#ifdef DEBUG +STATIC int +xfs_bmbt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + return be64_to_cpu(k1->bmbt.br_startoff) < + be64_to_cpu(k2->bmbt.br_startoff); +} + +STATIC int +xfs_bmbt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + return xfs_bmbt_disk_get_startoff(&r1->bmbt) + + xfs_bmbt_disk_get_blockcount(&r1->bmbt) <= + xfs_bmbt_disk_get_startoff(&r2->bmbt); +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_bmbt_trace_buf; @@ -801,6 +824,11 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_bmbt_keys_inorder, + .recs_inorder = xfs_bmbt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_bmbt_trace_enter, .trace_cursor = xfs_bmbt_trace_cursor, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 88eb00b..d667d30 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -52,122 +52,6 @@ const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC }; -/* - * External routines. - */ - -#ifdef DEBUG -/* - * Debug routine: check that keys are in the right order. - */ -void -xfs_btree_check_key( - xfs_btnum_t btnum, /* btree identifier */ - void *ak1, /* pointer to left (lower) key */ - void *ak2) /* pointer to right (higher) key */ -{ - switch (btnum) { - case XFS_BTNUM_BNO: { - xfs_alloc_key_t *k1; - xfs_alloc_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)); - break; - } - case XFS_BTNUM_CNT: { - xfs_alloc_key_t *k1; - xfs_alloc_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) || - (k1->ar_blockcount == k2->ar_blockcount && - be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock))); - break; - } - case XFS_BTNUM_BMAP: { - xfs_bmbt_key_t *k1; - xfs_bmbt_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be64_to_cpu(k1->br_startoff) < be64_to_cpu(k2->br_startoff)); - break; - } - case XFS_BTNUM_INO: { - xfs_inobt_key_t *k1; - xfs_inobt_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be32_to_cpu(k1->ir_startino) < be32_to_cpu(k2->ir_startino)); - break; - } - default: - ASSERT(0); - } -} - -/* - * Debug routine: check that records are in the right order. - */ -void -xfs_btree_check_rec( - xfs_btnum_t btnum, /* btree identifier */ - void *ar1, /* pointer to left (lower) record */ - void *ar2) /* pointer to right (higher) record */ -{ - switch (btnum) { - case XFS_BTNUM_BNO: { - xfs_alloc_rec_t *r1; - xfs_alloc_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(be32_to_cpu(r1->ar_startblock) + - be32_to_cpu(r1->ar_blockcount) <= - be32_to_cpu(r2->ar_startblock)); - break; - } - case XFS_BTNUM_CNT: { - xfs_alloc_rec_t *r1; - xfs_alloc_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) || - (r1->ar_blockcount == r2->ar_blockcount && - be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock))); - break; - } - case XFS_BTNUM_BMAP: { - xfs_bmbt_rec_t *r1; - xfs_bmbt_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(xfs_bmbt_disk_get_startoff(r1) + - xfs_bmbt_disk_get_blockcount(r1) <= - xfs_bmbt_disk_get_startoff(r2)); - break; - } - case XFS_BTNUM_INO: { - xfs_inobt_rec_t *r1; - xfs_inobt_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(be32_to_cpu(r1->ir_startino) + XFS_INODES_PER_CHUNK <= - be32_to_cpu(r2->ir_startino)); - break; - } - default: - ASSERT(0); - } -} -#endif /* DEBUG */ int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_lblock( @@ -2032,9 +1916,8 @@ xfs_btree_lshift( xfs_btree_log_keys(cur, lbp, lrecs, lrecs); xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); - xfs_btree_check_key(cur->bc_btnum, - xfs_btree_key_addr(cur, lrecs - 1, left), - lkp); + ASSERT(cur->bc_ops->keys_inorder(cur, + xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); } else { /* It's a leaf. Move records. */ union xfs_btree_rec *lrp; /* left record pointer */ @@ -2045,9 +1928,8 @@ xfs_btree_lshift( xfs_btree_copy_recs(cur, lrp, rrp, 1); xfs_btree_log_recs(cur, lbp, lrecs, lrecs); - xfs_btree_check_rec(cur->bc_btnum, - xfs_btree_rec_addr(cur, lrecs - 1, left), - lrp); + ASSERT(cur->bc_ops->recs_inorder(cur, + xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); } xfs_btree_set_numrecs(left, lrecs); @@ -2222,8 +2104,8 @@ xfs_btree_rshift( xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); - xfs_btree_check_key(cur->bc_btnum, rkp, - xfs_btree_key_addr(cur, 2, right)); + ASSERT(cur->bc_ops->keys_inorder(cur, rkp, + xfs_btree_key_addr(cur, 2, right))); } else { /* It's a leaf. make a hole in the records */ union xfs_btree_rec *lrp; @@ -2241,8 +2123,8 @@ xfs_btree_rshift( cur->bc_ops->init_key_from_rec(&key, rrp); rkp = &key; - xfs_btree_check_rec(cur->bc_btnum, rrp, - xfs_btree_rec_addr(cur, 2, right)); + ASSERT(cur->bc_ops->recs_inorder(cur, rrp, + xfs_btree_rec_addr(cur, 2, right))); } /* @@ -2849,11 +2731,11 @@ xfs_btree_insrec( /* Check that the new entry is being inserted in the right place. */ if (ptr <= numrecs) { if (level == 0) { - xfs_btree_check_rec(cur->bc_btnum, recp, - xfs_btree_rec_addr(cur, ptr, block)); + ASSERT(cur->bc_ops->recs_inorder(cur, recp, + xfs_btree_rec_addr(cur, ptr, block))); } else { - xfs_btree_check_key(cur->bc_btnum, &key, - xfs_btree_key_addr(cur, ptr, block)); + ASSERT(cur->bc_ops->keys_inorder(cur, &key, + xfs_btree_key_addr(cur, ptr, block))); } } #endif @@ -2923,8 +2805,8 @@ xfs_btree_insrec( xfs_btree_log_keys(cur, bp, ptr, numrecs); #ifdef DEBUG if (ptr < numrecs) { - xfs_btree_check_key(cur->bc_btnum, kp, - xfs_btree_key_addr(cur, ptr + 1, block)); + ASSERT(cur->bc_ops->keys_inorder(cur, kp, + xfs_btree_key_addr(cur, ptr + 1, block))); } #endif } else { @@ -2941,8 +2823,8 @@ xfs_btree_insrec( xfs_btree_log_recs(cur, bp, ptr, numrecs); #ifdef DEBUG if (ptr < numrecs) { - xfs_btree_check_rec(cur->bc_btnum, rp, - xfs_btree_rec_addr(cur, ptr + 1, block)); + ASSERT(cur->bc_ops->recs_inorder(cur, rp, + xfs_btree_rec_addr(cur, ptr + 1, block))); } #endif } diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index d6cc71e..25a488d 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -229,6 +229,18 @@ struct xfs_btree_ops { __int64_t (*key_diff)(struct xfs_btree_cur *cur, union xfs_btree_key *key); +#ifdef DEBUG + /* check that k1 is lower than k2 */ + int (*keys_inorder)(struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2); + + /* check that r1 is lower than r2 */ + int (*recs_inorder)(struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2); +#endif + /* btree tracing */ #ifdef XFS_BTREE_TRACE void (*trace_enter)(struct xfs_btree_cur *, const char *, @@ -379,30 +391,6 @@ xfs_btree_check_ptr( int index, /* offset from ptr to check */ int level); /* btree block level */ -#ifdef DEBUG - -/* - * Debug routine: check that keys are in the right order. - */ -void -xfs_btree_check_key( - xfs_btnum_t btnum, /* btree identifier */ - void *ak1, /* pointer to left (lower) key */ - void *ak2); /* pointer to right (higher) key */ - -/* - * Debug routine: check that records are in the right order. - */ -void -xfs_btree_check_rec( - xfs_btnum_t btnum, /* btree identifier */ - void *ar1, /* pointer to left (lower) record */ - void *ar2); /* pointer to right (higher) record */ -#else -#define xfs_btree_check_key(a, b, c) -#define xfs_btree_check_rec(a, b, c) -#endif /* DEBUG */ - /* * Delete the btree cursor. */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 9f4e33c..dcd4a95 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -219,6 +219,28 @@ xfs_inobt_kill_root( return 0; } +#ifdef DEBUG +STATIC int +xfs_inobt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + return be32_to_cpu(k1->inobt.ir_startino) < + be32_to_cpu(k2->inobt.ir_startino); +} + +STATIC int +xfs_inobt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= + be32_to_cpu(r2->inobt.ir_startino); +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_inobt_trace_buf; @@ -302,6 +324,11 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, .key_diff = xfs_inobt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_inobt_keys_inorder, + .recs_inorder = xfs_inobt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_inobt_trace_enter, .trace_cursor = xfs_inobt_trace_cursor, |