summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>2000-03-16 08:51:55 +0000
committerphk <phk@FreeBSD.org>2000-03-16 08:51:55 +0000
commit6b3385b7739c4d2d08aab8eb870efd47663efe4b (patch)
treeb6a76651fe554cb41f35772ffdbaad23da921695 /sys/kern
parent54ba9c4d1b2ccfc21ec86916fa24df94f359bdeb (diff)
downloadFreeBSD-src-6b3385b7739c4d2d08aab8eb870efd47663efe4b.zip
FreeBSD-src-6b3385b7739c4d2d08aab8eb870efd47663efe4b.tar.gz
Eliminate the undocumented, experimental, non-delivering and highly
dangerous MAX_PERF option.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c16
-rw-r--r--sys/kern/subr_blist.c2
-rw-r--r--sys/kern/vfs_bio.c26
-rw-r--r--sys/kern/vfs_export.c6
-rw-r--r--sys/kern/vfs_subr.c6
5 files changed, 0 insertions, 56 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index b47ca55..50d186c 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -246,10 +246,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
/* fall into downgrade */
case LK_DOWNGRADE:
-#if !defined(MAX_PERF)
if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
panic("lockmgr: not holding exclusive lock");
-#endif
sharelock(lkp, lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
@@ -281,10 +279,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
-#if !defined(MAX_PERF)
if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
panic("lockmgr: upgrade exclusive lock");
-#endif
shareunlock(lkp, 1);
COUNT(p, -1);
/*
@@ -310,10 +306,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
lkp->lk_lockholder = pid;
-#if !defined(MAX_PERF)
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
-#endif
lkp->lk_exclusivecount = 1;
#if defined(DEBUG_LOCKS)
lkp->lk_filename = file;
@@ -338,10 +332,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
/*
* Recursive lock.
*/
-#if !defined(MAX_PERF)
if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
panic("lockmgr: locking against myself");
-#endif
if ((extflags & LK_CANRECURSE) != 0) {
lkp->lk_exclusivecount++;
COUNT(p, 1);
@@ -372,10 +364,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
lkp->lk_lockholder = pid;
-#if !defined(MAX_PERF)
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
-#endif
lkp->lk_exclusivecount = 1;
#if defined(DEBUG_LOCKS)
lkp->lk_filename = file;
@@ -387,14 +377,12 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
-#if !defined(MAX_PERF)
if (lkp->lk_lockholder != pid &&
lkp->lk_lockholder != LK_KERNPROC) {
panic("lockmgr: pid %d, not %s %d unlocking",
pid, "exclusive lock holder",
lkp->lk_lockholder);
}
-#endif
if (lkp->lk_lockholder != LK_KERNPROC) {
COUNT(p, -1);
}
@@ -420,10 +408,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
-#if !defined(MAX_PERF)
if (lkp->lk_lockholder == pid)
panic("lockmgr: draining against myself");
-#endif
error = acquiredrain(lkp, extflags);
if (error)
@@ -440,11 +426,9 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
break;
default:
-#if !defined(MAX_PERF)
simple_unlock(&lkp->lk_interlock);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
-#endif
/* NOTREACHED */
}
if ((lkp->lk_flags & LK_WAITDRAIN) &&
diff --git a/sys/kern/subr_blist.c b/sys/kern/subr_blist.c
index cad453f..d706524 100644
--- a/sys/kern/subr_blist.c
+++ b/sys/kern/subr_blist.c
@@ -545,10 +545,8 @@ blst_meta_free(
if (scan->u.bmu_avail == radix)
return;
-#if !defined(MAX_PERF)
if (scan->u.bmu_avail > radix)
panic("blst_meta_free: freeing already free blocks (%d) %d/%d", count, scan->u.bmu_avail, radix);
-#endif
/*
* Break the free down into its components
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 5868604..a2e8d26 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -456,10 +456,8 @@ bremfree(struct buf * bp)
bp->b_qindex = QUEUE_NONE;
runningbufspace += bp->b_bufsize;
} else {
-#if !defined(MAX_PERF)
if (BUF_REFCNT(bp) <= 1)
panic("bremfree: removing a buffer not on a queue");
-#endif
}
/*
@@ -603,10 +601,8 @@ bwrite(struct buf * bp)
oldflags = bp->b_flags;
-#if !defined(MAX_PERF)
if (BUF_REFCNT(bp) == 0)
panic("bwrite: buffer is not busy???");
-#endif
s = splbio();
/*
* If a background write is already in progress, delay
@@ -751,10 +747,8 @@ vfs_backgroundwritedone(bp)
void
bdwrite(struct buf * bp)
{
-#if !defined(MAX_PERF)
if (BUF_REFCNT(bp) == 0)
panic("bdwrite: buffer is not busy");
-#endif
if (bp->b_flags & B_INVAL) {
brelse(bp);
@@ -1061,11 +1055,9 @@ brelse(struct buf * bp)
m = bp->b_pages[j];
if (m == bogus_page) {
m = vm_page_lookup(obj, poff + j);
-#if !defined(MAX_PERF)
if (!m) {
panic("brelse: page missing\n");
}
-#endif
bp->b_pages[j] = m;
}
}
@@ -1096,10 +1088,8 @@ brelse(struct buf * bp)
}
-#if !defined(MAX_PERF)
if (bp->b_qindex != QUEUE_NONE)
panic("brelse: free buffer onto another queue???");
-#endif
if (BUF_REFCNT(bp) > 1) {
/* Temporary panic to verify exclusive locking */
/* This panic goes away when we allow shared refs */
@@ -1229,10 +1219,8 @@ bqrelse(struct buf * bp)
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
-#if !defined(MAX_PERF)
if (bp->b_qindex != QUEUE_NONE)
panic("bqrelse: free buffer onto another queue???");
-#endif
if (BUF_REFCNT(bp) > 1) {
/* do not release to free list */
panic("bqrelse: multiple refs");
@@ -2109,10 +2097,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
int s;
struct bufhashhdr *bh;
-#if !defined(MAX_PERF)
if (size > MAXBSIZE)
panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
-#endif
s = splbio();
loop:
@@ -2346,13 +2332,11 @@ allocbuf(struct buf *bp, int size)
int newbsize, mbsize;
int i;
-#if !defined(MAX_PERF)
if (BUF_REFCNT(bp) == 0)
panic("allocbuf: buffer not busy");
if (bp->b_kvasize < size)
panic("allocbuf: buffer too small");
-#endif
if ((bp->b_flags & B_VMIO) == 0) {
caddr_t origbuf;
@@ -2745,11 +2729,9 @@ biodone(register struct buf * bp)
KASSERT(bp->b_offset != NOOFFSET,
("biodone: no buffer offset"));
-#if !defined(MAX_PERF)
if (!obj) {
panic("biodone: no object");
}
-#endif
#if defined(VFS_BIO_DEBUG)
if (obj->paging_in_progress < bp->b_npages) {
printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
@@ -2811,15 +2793,12 @@ biodone(register struct buf * bp)
* have not set the page busy flag correctly!!!
*/
if (m->busy == 0) {
-#if !defined(MAX_PERF)
printf("biodone: page busy < 0, "
"pindex: %d, foff: 0x(%x,%x), "
"resid: %d, index: %d\n",
(int) m->pindex, (int)(foff >> 32),
(int) foff & 0xffffffff, resid, i);
-#endif
if (!vn_isdisk(vp, NULL))
-#if !defined(MAX_PERF)
printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
bp->b_vp->v_mount->mnt_stat.f_iosize,
(int) bp->b_lblkno,
@@ -2830,7 +2809,6 @@ biodone(register struct buf * bp)
bp->b_flags, bp->b_npages);
printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
m->valid, m->dirty, m->wire_count);
-#endif
panic("biodone: page busy < 0\n");
}
vm_page_io_finish(m);
@@ -2877,11 +2855,9 @@ vfs_unbusy_pages(struct buf * bp)
if (m == bogus_page) {
m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
-#if !defined(MAX_PERF)
if (!m) {
panic("vfs_unbusy_pages: page missing\n");
}
-#endif
bp->b_pages[i] = m;
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
@@ -3185,12 +3161,10 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
p = bp->b_pages[index];
if (p && (index < bp->b_npages)) {
-#if !defined(MAX_PERF)
if (p->busy) {
printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
bp->b_blkno, bp->b_lblkno);
}
-#endif
bp->b_pages[index] = NULL;
pmap_kremove(pg);
vm_page_busy(p);
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index dc1ada5..8b28d38 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -1110,7 +1110,6 @@ pbrelvp(bp)
KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
-#if !defined(MAX_PERF)
/* XXX REMOVE ME */
if (bp->b_vnbufs.tqe_next != NULL) {
panic(
@@ -1119,7 +1118,6 @@ pbrelvp(bp)
(int)bp->b_flags
);
}
-#endif
bp->b_vp = (struct vnode *) 0;
bp->b_flags &= ~B_PAGING;
}
@@ -1129,14 +1127,12 @@ pbreassignbuf(bp, newvp)
struct buf *bp;
struct vnode *newvp;
{
-#if !defined(MAX_PERF)
if ((bp->b_flags & B_PAGING) == 0) {
panic(
"pbreassignbuf() on non phys bp %p",
bp
);
}
-#endif
bp->b_vp = newvp;
}
@@ -1160,14 +1156,12 @@ reassignbuf(bp, newvp)
}
++reassignbufcalls;
-#if !defined(MAX_PERF)
/*
* B_PAGING flagged buffers cannot be reassigned because their vp
* is not fully linked in.
*/
if (bp->b_flags & B_PAGING)
panic("cannot reassign paging buffer");
-#endif
s = splbio();
/*
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index dc1ada5..8b28d38 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1110,7 +1110,6 @@ pbrelvp(bp)
KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
-#if !defined(MAX_PERF)
/* XXX REMOVE ME */
if (bp->b_vnbufs.tqe_next != NULL) {
panic(
@@ -1119,7 +1118,6 @@ pbrelvp(bp)
(int)bp->b_flags
);
}
-#endif
bp->b_vp = (struct vnode *) 0;
bp->b_flags &= ~B_PAGING;
}
@@ -1129,14 +1127,12 @@ pbreassignbuf(bp, newvp)
struct buf *bp;
struct vnode *newvp;
{
-#if !defined(MAX_PERF)
if ((bp->b_flags & B_PAGING) == 0) {
panic(
"pbreassignbuf() on non phys bp %p",
bp
);
}
-#endif
bp->b_vp = newvp;
}
@@ -1160,14 +1156,12 @@ reassignbuf(bp, newvp)
}
++reassignbufcalls;
-#if !defined(MAX_PERF)
/*
* B_PAGING flagged buffers cannot be reassigned because their vp
* is not fully linked in.
*/
if (bp->b_flags & B_PAGING)
panic("cannot reassign paging buffer");
-#endif
s = splbio();
/*
OpenPOWER on IntegriCloud