diff options
author | phk <phk@FreeBSD.org> | 2000-03-16 08:51:55 +0000 |
---|---|---|
committer | phk <phk@FreeBSD.org> | 2000-03-16 08:51:55 +0000 |
commit | 6b3385b7739c4d2d08aab8eb870efd47663efe4b (patch) | |
tree | b6a76651fe554cb41f35772ffdbaad23da921695 | |
parent | 54ba9c4d1b2ccfc21ec86916fa24df94f359bdeb (diff) | |
download | FreeBSD-src-6b3385b7739c4d2d08aab8eb870efd47663efe4b.zip FreeBSD-src-6b3385b7739c4d2d08aab8eb870efd47663efe4b.tar.gz |
Eliminate the undocumented, experimental, non-delivering and highly
dangerous MAX_PERF option.
-rw-r--r-- | sys/alpha/alpha/pmap.c | 14 | ||||
-rw-r--r-- | sys/amd64/amd64/pmap.c | 34 | ||||
-rw-r--r-- | sys/fs/specfs/spec_vnops.c | 2 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 34 | ||||
-rw-r--r-- | sys/kern/kern_lock.c | 16 | ||||
-rw-r--r-- | sys/kern/subr_blist.c | 2 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 26 | ||||
-rw-r--r-- | sys/kern/vfs_export.c | 6 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 6 | ||||
-rw-r--r-- | sys/miscfs/devfs/devfs_vnops.c | 2 | ||||
-rw-r--r-- | sys/miscfs/specfs/spec_vnops.c | 2 | ||||
-rw-r--r-- | sys/vm/swap_pager.c | 10 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 16 |
14 files changed, 0 insertions, 174 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c index 89c8c6d..a7bc944 100644 --- a/sys/alpha/alpha/pmap.c +++ b/sys/alpha/alpha/pmap.c @@ -968,10 +968,8 @@ pmap_new_proc(struct proc *p) if ((up = p->p_addr) == NULL) { up = (struct user *) kmem_alloc_nofault(kernel_map, UPAGES * PAGE_SIZE); -#if !defined(MAX_PERF) if (up == NULL) panic("pmap_new_proc: u_map allocation failed"); -#endif p->p_addr = up; } @@ -1088,10 +1086,8 @@ pmap_swapin_proc(p) if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(upobj, &m, 1, 0); -#if !defined(MAX_PERF) if (rv != VM_PAGER_OK) panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid); -#endif m = vm_page_lookup(upobj, i); m->valid = VM_PAGE_BITS_ALL; } @@ -1581,10 +1577,8 @@ pmap_growkernel(vm_offset_t addr) int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I; nkpg = vm_page_alloc(kptobj, pindex, VM_ALLOC_SYSTEM); -#if !defined(MAX_PERF) if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); -#endif printf("pmap_growkernel: growing to %lx\n", addr); printf("pmap_growkernel: adding new level2 page table\n"); @@ -1619,10 +1613,8 @@ pmap_growkernel(vm_offset_t addr) * This index is bogus, but out of the way */ nkpg = vm_page_alloc(kptobj, nklev3, VM_ALLOC_SYSTEM); -#if !defined(MAX_PERF) if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); -#endif nklev3++; @@ -1653,9 +1645,7 @@ pmap_destroy(pmap_t pmap) count = --pmap->pm_count; if (count == 0) { pmap_release(pmap); -#if !defined(MAX_PERF) panic("destroying a pmap is not yet implemented"); -#endif } } @@ -2078,14 +2068,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, pte = pmap_lev3pte(pmap, va); -#if !defined(MAX_PERF) /* * Page Directory table entry not valid, we need a new PT page */ if (pte == NULL) { panic("pmap_enter: invalid kernel page tables pmap=%p, va=0x%lx\n", pmap, va); } -#endif origpte = *pte; pa &= ~PAGE_MASK; @@ -2123,10 +2111,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, if (opa) { int err; err = pmap_remove_pte(pmap, pte, va); -#if !defined(MAX_PERF) if (err) panic("pmap_enter: pte vanished, va: 0x%lx", va); -#endif } /* diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 80a99f5..6f87229 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -890,10 +890,8 @@ pmap_new_proc(p) if ((up = p->p_addr) == NULL) { up = (struct user *) kmem_alloc_nofault(kernel_map, UPAGES * PAGE_SIZE); -#if !defined(MAX_PERF) if (up == NULL) panic("pmap_new_proc: u_map allocation failed"); -#endif p->p_addr = up; } @@ -1015,10 +1013,8 @@ pmap_swapin_proc(p) if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(upobj, &m, 1, 0); -#if !defined(MAX_PERF) if (rv != VM_PAGER_OK) panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid); -#endif m = vm_page_lookup(upobj, i); m->valid = VM_PAGE_BITS_ALL; } @@ -1222,11 +1218,9 @@ pmap_release_free_page(pmap, p) pde[p->pindex] = 0; pmap->pm_stats.resident_count--; -#if !defined(MAX_PERF) if (p->hold_count) { panic("pmap_release: freeing held page table page"); } -#endif /* * Page directory pages need to have the kernel * stuff cleared, so they can go into the zero queue also. @@ -1448,10 +1442,8 @@ pmap_growkernel(vm_offset_t addr) * This index is bogus, but out of the way */ nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM); -#if !defined(MAX_PERF) if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); -#endif nkpt++; @@ -1490,9 +1482,7 @@ pmap_destroy(pmap) count = --pmap->pm_count; if (count == 0) { pmap_release(pmap); -#if !defined(MAX_PERF) panic("destroying a pmap is not yet implemented"); -#endif } } @@ -2058,7 +2048,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, pte = pmap_pte(pmap, va); -#if !defined(MAX_PERF) /* * Page Directory table entry not valid, we need a new PT page */ @@ -2066,16 +2055,13 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n", (void *)pmap->pm_pdir[PTDPTDI], va); } -#endif origpte = *(vm_offset_t *)pte; pa &= PG_FRAME; opa = origpte & PG_FRAME; -#if !defined(MAX_PERF) if (origpte & PG_PS) panic("pmap_enter: attempted pmap_enter on 4MB page"); -#endif /* * Mapping has not changed, must be protection or wiring change. @@ -2141,10 +2127,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, if (opa) { int err; err = pmap_remove_pte(pmap, pte, va); -#if !defined(MAX_PERF) if (err) panic("pmap_enter: pte vanished, va: 0x%x", va); -#endif } /* @@ -2241,10 +2225,8 @@ retry: * the hold count, and activate it. */ if (ptepa) { -#if !defined(MAX_PERF) if (ptepa & PG_PS) panic("pmap_enter_quick: unexpected mapping into 4MB page"); -#endif if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { mpte = pmap->pm_ptphint; @@ -2619,10 +2601,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) vm_offset_t srcptepaddr; unsigned ptepindex; -#if !defined(MAX_PERF) if (addr >= UPT_MIN_ADDRESS) panic("pmap_copy: invalid to pmap_copy page tables\n"); -#endif /* * Don't let optional prefaulting of pages make us go @@ -2715,10 +2695,8 @@ pmap_zero_page(phys) vm_offset_t phys; { #ifdef SMP -#if !defined(MAX_PERF) if (*(int *) prv_CMAP3) panic("pmap_zero_page: prv_CMAP3 busy"); -#endif *(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; cpu_invlpg(prv_CADDR3); @@ -2732,10 +2710,8 @@ pmap_zero_page(phys) *(int *) prv_CMAP3 = 0; #else -#if !defined(MAX_PERF) if (*(int *) CMAP2) panic("pmap_zero_page: CMAP2 busy"); -#endif *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); @@ -2763,10 +2739,8 @@ pmap_zero_page_area(phys, off, size) int size; { #ifdef SMP -#if !defined(MAX_PERF) if (*(int *) prv_CMAP3) panic("pmap_zero_page: prv_CMAP3 busy"); -#endif *(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; cpu_invlpg(prv_CADDR3); @@ -2780,10 +2754,8 @@ pmap_zero_page_area(phys, off, size) *(int *) prv_CMAP3 = 0; #else -#if !defined(MAX_PERF) if (*(int *) CMAP2) panic("pmap_zero_page: CMAP2 busy"); -#endif *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); @@ -2810,12 +2782,10 @@ pmap_copy_page(src, dst) vm_offset_t dst; { #ifdef SMP -#if !defined(MAX_PERF) if (*(int *) prv_CMAP1) panic("pmap_copy_page: prv_CMAP1 busy"); if (*(int *) prv_CMAP2) panic("pmap_copy_page: prv_CMAP2 busy"); -#endif *(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A; *(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M; @@ -2828,10 +2798,8 @@ pmap_copy_page(src, dst) *(int *) prv_CMAP1 = 0; *(int *) prv_CMAP2 = 0; #else -#if !defined(MAX_PERF) if (*(int *) CMAP1 || *(int *) CMAP2) panic("pmap_copy_page: CMAP busy"); -#endif *(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A; *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M; @@ -3286,10 +3254,8 @@ pmap_mapdev(pa, size) size = roundup(offset + size, PAGE_SIZE); va = kmem_alloc_pageable(kernel_map, size); -#if !defined(MAX_PERF) if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); -#endif pa = pa & PG_FRAME; for (tmpva = va; size > 0;) { diff --git a/sys/fs/specfs/spec_vnops.c b/sys/fs/specfs/spec_vnops.c index 9641897..a9a711e 100644 --- a/sys/fs/specfs/spec_vnops.c +++ b/sys/fs/specfs/spec_vnops.c @@ -756,7 +756,6 @@ spec_getpages(ap) } if (!gotreqpage) { m = ap->a_m[ap->a_reqpage]; -#ifndef MAX_PERF printf( "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", devtoname(bp->b_dev), error, bp, bp->b_vp); @@ -766,7 +765,6 @@ spec_getpages(ap) printf( " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", nread, ap->a_reqpage, (u_long)m->pindex, pcount); -#endif /* * Free the buffer header back to the swap buffer pool. */ diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 80a99f5..6f87229 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -890,10 +890,8 @@ pmap_new_proc(p) if ((up = p->p_addr) == NULL) { up = (struct user *) kmem_alloc_nofault(kernel_map, UPAGES * PAGE_SIZE); -#if !defined(MAX_PERF) if (up == NULL) panic("pmap_new_proc: u_map allocation failed"); -#endif p->p_addr = up; } @@ -1015,10 +1013,8 @@ pmap_swapin_proc(p) if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(upobj, &m, 1, 0); -#if !defined(MAX_PERF) if (rv != VM_PAGER_OK) panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid); -#endif m = vm_page_lookup(upobj, i); m->valid = VM_PAGE_BITS_ALL; } @@ -1222,11 +1218,9 @@ pmap_release_free_page(pmap, p) pde[p->pindex] = 0; pmap->pm_stats.resident_count--; -#if !defined(MAX_PERF) if (p->hold_count) { panic("pmap_release: freeing held page table page"); } -#endif /* * Page directory pages need to have the kernel * stuff cleared, so they can go into the zero queue also. @@ -1448,10 +1442,8 @@ pmap_growkernel(vm_offset_t addr) * This index is bogus, but out of the way */ nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM); -#if !defined(MAX_PERF) if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); -#endif nkpt++; @@ -1490,9 +1482,7 @@ pmap_destroy(pmap) count = --pmap->pm_count; if (count == 0) { pmap_release(pmap); -#if !defined(MAX_PERF) panic("destroying a pmap is not yet implemented"); -#endif } } @@ -2058,7 +2048,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, pte = pmap_pte(pmap, va); -#if !defined(MAX_PERF) /* * Page Directory table entry not valid, we need a new PT page */ @@ -2066,16 +2055,13 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n", (void *)pmap->pm_pdir[PTDPTDI], va); } -#endif origpte = *(vm_offset_t *)pte; pa &= PG_FRAME; opa = origpte & PG_FRAME; -#if !defined(MAX_PERF) if (origpte & PG_PS) panic("pmap_enter: attempted pmap_enter on 4MB page"); -#endif /* * Mapping has not changed, must be protection or wiring change. @@ -2141,10 +2127,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, if (opa) { int err; err = pmap_remove_pte(pmap, pte, va); -#if !defined(MAX_PERF) if (err) panic("pmap_enter: pte vanished, va: 0x%x", va); -#endif } /* @@ -2241,10 +2225,8 @@ retry: * the hold count, and activate it. */ if (ptepa) { -#if !defined(MAX_PERF) if (ptepa & PG_PS) panic("pmap_enter_quick: unexpected mapping into 4MB page"); -#endif if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == ptepindex)) { mpte = pmap->pm_ptphint; @@ -2619,10 +2601,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) vm_offset_t srcptepaddr; unsigned ptepindex; -#if !defined(MAX_PERF) if (addr >= UPT_MIN_ADDRESS) panic("pmap_copy: invalid to pmap_copy page tables\n"); -#endif /* * Don't let optional prefaulting of pages make us go @@ -2715,10 +2695,8 @@ pmap_zero_page(phys) vm_offset_t phys; { #ifdef SMP -#if !defined(MAX_PERF) if (*(int *) prv_CMAP3) panic("pmap_zero_page: prv_CMAP3 busy"); -#endif *(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; cpu_invlpg(prv_CADDR3); @@ -2732,10 +2710,8 @@ pmap_zero_page(phys) *(int *) prv_CMAP3 = 0; #else -#if !defined(MAX_PERF) if (*(int *) CMAP2) panic("pmap_zero_page: CMAP2 busy"); -#endif *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); @@ -2763,10 +2739,8 @@ pmap_zero_page_area(phys, off, size) int size; { #ifdef SMP -#if !defined(MAX_PERF) if (*(int *) prv_CMAP3) panic("pmap_zero_page: prv_CMAP3 busy"); -#endif *(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; cpu_invlpg(prv_CADDR3); @@ -2780,10 +2754,8 @@ pmap_zero_page_area(phys, off, size) *(int *) prv_CMAP3 = 0; #else -#if !defined(MAX_PERF) if (*(int *) CMAP2) panic("pmap_zero_page: CMAP2 busy"); -#endif *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M; invltlb_1pg((vm_offset_t)CADDR2); @@ -2810,12 +2782,10 @@ pmap_copy_page(src, dst) vm_offset_t dst; { #ifdef SMP -#if !defined(MAX_PERF) if (*(int *) prv_CMAP1) panic("pmap_copy_page: prv_CMAP1 busy"); if (*(int *) prv_CMAP2) panic("pmap_copy_page: prv_CMAP2 busy"); -#endif *(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A; *(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M; @@ -2828,10 +2798,8 @@ pmap_copy_page(src, dst) *(int *) prv_CMAP1 = 0; *(int *) prv_CMAP2 = 0; #else -#if !defined(MAX_PERF) if (*(int *) CMAP1 || *(int *) CMAP2) panic("pmap_copy_page: CMAP busy"); -#endif *(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A; *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M; @@ -3286,10 +3254,8 @@ pmap_mapdev(pa, size) size = roundup(offset + size, PAGE_SIZE); va = kmem_alloc_pageable(kernel_map, size); -#if !defined(MAX_PERF) if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); -#endif pa = pa & PG_FRAME; for (tmpva = va; size > 0;) { diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index b47ca55..50d186c 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -246,10 +246,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) /* fall into downgrade */ case LK_DOWNGRADE: -#if !defined(MAX_PERF) if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) panic("lockmgr: not holding exclusive lock"); -#endif sharelock(lkp, lkp->lk_exclusivecount); lkp->lk_exclusivecount = 0; lkp->lk_flags &= ~LK_HAVE_EXCL; @@ -281,10 +279,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) * after the upgrade). If we return an error, the file * will always be unlocked. */ -#if !defined(MAX_PERF) if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) panic("lockmgr: upgrade exclusive lock"); -#endif shareunlock(lkp, 1); COUNT(p, -1); /* @@ -310,10 +306,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) break; lkp->lk_flags |= LK_HAVE_EXCL; lkp->lk_lockholder = pid; -#if !defined(MAX_PERF) if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); -#endif lkp->lk_exclusivecount = 1; #if defined(DEBUG_LOCKS) lkp->lk_filename = file; @@ -338,10 +332,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) /* * Recursive lock. */ -#if !defined(MAX_PERF) if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) panic("lockmgr: locking against myself"); -#endif if ((extflags & LK_CANRECURSE) != 0) { lkp->lk_exclusivecount++; COUNT(p, 1); @@ -372,10 +364,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) break; lkp->lk_flags |= LK_HAVE_EXCL; lkp->lk_lockholder = pid; -#if !defined(MAX_PERF) if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); -#endif lkp->lk_exclusivecount = 1; #if defined(DEBUG_LOCKS) lkp->lk_filename = file; @@ -387,14 +377,12 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) case LK_RELEASE: if (lkp->lk_exclusivecount != 0) { -#if !defined(MAX_PERF) if (lkp->lk_lockholder != pid && lkp->lk_lockholder != LK_KERNPROC) { panic("lockmgr: pid %d, not %s %d unlocking", pid, "exclusive lock holder", lkp->lk_lockholder); } -#endif if (lkp->lk_lockholder != LK_KERNPROC) { COUNT(p, -1); } @@ -420,10 +408,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) * check for holding a shared lock, but at least we can * check for an exclusive one. */ -#if !defined(MAX_PERF) if (lkp->lk_lockholder == pid) panic("lockmgr: draining against myself"); -#endif error = acquiredrain(lkp, extflags); if (error) @@ -440,11 +426,9 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line) break; default: -#if !defined(MAX_PERF) simple_unlock(&lkp->lk_interlock); panic("lockmgr: unknown locktype request %d", flags & LK_TYPE_MASK); -#endif /* NOTREACHED */ } if ((lkp->lk_flags & LK_WAITDRAIN) && diff --git a/sys/kern/subr_blist.c b/sys/kern/subr_blist.c index cad453f..d706524 100644 --- a/sys/kern/subr_blist.c +++ b/sys/kern/subr_blist.c @@ -545,10 +545,8 @@ blst_meta_free( if (scan->u.bmu_avail == radix) return; -#if !defined(MAX_PERF) if (scan->u.bmu_avail > radix) panic("blst_meta_free: freeing already free blocks (%d) %d/%d", count, scan->u.bmu_avail, radix); -#endif /* * Break the free down into its components diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 5868604..a2e8d26 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -456,10 +456,8 @@ bremfree(struct buf * bp) bp->b_qindex = QUEUE_NONE; runningbufspace += bp->b_bufsize; } else { -#if !defined(MAX_PERF) if (BUF_REFCNT(bp) <= 1) panic("bremfree: removing a buffer not on a queue"); -#endif } /* @@ -603,10 +601,8 @@ bwrite(struct buf * bp) oldflags = bp->b_flags; -#if !defined(MAX_PERF) if (BUF_REFCNT(bp) == 0) panic("bwrite: buffer is not busy???"); -#endif s = splbio(); /* * If a background write is already in progress, delay @@ -751,10 +747,8 @@ vfs_backgroundwritedone(bp) void bdwrite(struct buf * bp) { -#if !defined(MAX_PERF) if (BUF_REFCNT(bp) == 0) panic("bdwrite: buffer is not busy"); -#endif if (bp->b_flags & B_INVAL) { brelse(bp); @@ -1061,11 +1055,9 @@ brelse(struct buf * bp) m = bp->b_pages[j]; if (m == bogus_page) { m = vm_page_lookup(obj, poff + j); -#if !defined(MAX_PERF) if (!m) { panic("brelse: page missing\n"); } -#endif bp->b_pages[j] = m; } } @@ -1096,10 +1088,8 @@ brelse(struct buf * bp) } -#if !defined(MAX_PERF) if (bp->b_qindex != QUEUE_NONE) panic("brelse: free buffer onto another queue???"); -#endif if (BUF_REFCNT(bp) > 1) { /* Temporary panic to verify exclusive locking */ /* This panic goes away when we allow shared refs */ @@ -1229,10 +1219,8 @@ bqrelse(struct buf * bp) KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); -#if !defined(MAX_PERF) if (bp->b_qindex != QUEUE_NONE) panic("bqrelse: free buffer onto another queue???"); -#endif if (BUF_REFCNT(bp) > 1) { /* do not release to free list */ panic("bqrelse: multiple refs"); @@ -2109,10 +2097,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) int s; struct bufhashhdr *bh; -#if !defined(MAX_PERF) if (size > MAXBSIZE) panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); -#endif s = splbio(); loop: @@ -2346,13 +2332,11 @@ allocbuf(struct buf *bp, int size) int newbsize, mbsize; int i; -#if !defined(MAX_PERF) if (BUF_REFCNT(bp) == 0) panic("allocbuf: buffer not busy"); if (bp->b_kvasize < size) panic("allocbuf: buffer too small"); -#endif if ((bp->b_flags & B_VMIO) == 0) { caddr_t origbuf; @@ -2745,11 +2729,9 @@ biodone(register struct buf * bp) KASSERT(bp->b_offset != NOOFFSET, ("biodone: no buffer offset")); -#if !defined(MAX_PERF) if (!obj) { panic("biodone: no object"); } -#endif #if defined(VFS_BIO_DEBUG) if (obj->paging_in_progress < bp->b_npages) { printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", @@ -2811,15 +2793,12 @@ biodone(register struct buf * bp) * have not set the page busy flag correctly!!! */ if (m->busy == 0) { -#if !defined(MAX_PERF) printf("biodone: page busy < 0, " "pindex: %d, foff: 0x(%x,%x), " "resid: %d, index: %d\n", (int) m->pindex, (int)(foff >> 32), (int) foff & 0xffffffff, resid, i); -#endif if (!vn_isdisk(vp, NULL)) -#if !defined(MAX_PERF) printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", bp->b_vp->v_mount->mnt_stat.f_iosize, (int) bp->b_lblkno, @@ -2830,7 +2809,6 @@ biodone(register struct buf * bp) bp->b_flags, bp->b_npages); printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", m->valid, m->dirty, m->wire_count); -#endif panic("biodone: page busy < 0\n"); } vm_page_io_finish(m); @@ -2877,11 +2855,9 @@ vfs_unbusy_pages(struct buf * bp) if (m == bogus_page) { m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); -#if !defined(MAX_PERF) if (!m) { panic("vfs_unbusy_pages: page missing\n"); } -#endif bp->b_pages[i] = m; pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); } @@ -3185,12 +3161,10 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) for (pg = from; pg < to; pg += PAGE_SIZE, index++) { p = bp->b_pages[index]; if (p && (index < bp->b_npages)) { -#if !defined(MAX_PERF) if (p->busy) { printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", bp->b_blkno, bp->b_lblkno); } -#endif bp->b_pages[index] = NULL; pmap_kremove(pg); vm_page_busy(p); diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c index dc1ada5..8b28d38 100644 --- a/sys/kern/vfs_export.c +++ b/sys/kern/vfs_export.c @@ -1110,7 +1110,6 @@ pbrelvp(bp) KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); -#if !defined(MAX_PERF) /* XXX REMOVE ME */ if (bp->b_vnbufs.tqe_next != NULL) { panic( @@ -1119,7 +1118,6 @@ pbrelvp(bp) (int)bp->b_flags ); } -#endif bp->b_vp = (struct vnode *) 0; bp->b_flags &= ~B_PAGING; } @@ -1129,14 +1127,12 @@ pbreassignbuf(bp, newvp) struct buf *bp; struct vnode *newvp; { -#if !defined(MAX_PERF) if ((bp->b_flags & B_PAGING) == 0) { panic( "pbreassignbuf() on non phys bp %p", bp ); } -#endif bp->b_vp = newvp; } @@ -1160,14 +1156,12 @@ reassignbuf(bp, newvp) } ++reassignbufcalls; -#if !defined(MAX_PERF) /* * B_PAGING flagged buffers cannot be reassigned because their vp * is not fully linked in. */ if (bp->b_flags & B_PAGING) panic("cannot reassign paging buffer"); -#endif s = splbio(); /* diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index dc1ada5..8b28d38 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1110,7 +1110,6 @@ pbrelvp(bp) KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); -#if !defined(MAX_PERF) /* XXX REMOVE ME */ if (bp->b_vnbufs.tqe_next != NULL) { panic( @@ -1119,7 +1118,6 @@ pbrelvp(bp) (int)bp->b_flags ); } -#endif bp->b_vp = (struct vnode *) 0; bp->b_flags &= ~B_PAGING; } @@ -1129,14 +1127,12 @@ pbreassignbuf(bp, newvp) struct buf *bp; struct vnode *newvp; { -#if !defined(MAX_PERF) if ((bp->b_flags & B_PAGING) == 0) { panic( "pbreassignbuf() on non phys bp %p", bp ); } -#endif bp->b_vp = newvp; } @@ -1160,14 +1156,12 @@ reassignbuf(bp, newvp) } ++reassignbufcalls; -#if !defined(MAX_PERF) /* * B_PAGING flagged buffers cannot be reassigned because their vp * is not fully linked in. */ if (bp->b_flags & B_PAGING) panic("cannot reassign paging buffer"); -#endif s = splbio(); /* diff --git a/sys/miscfs/devfs/devfs_vnops.c b/sys/miscfs/devfs/devfs_vnops.c index 51315c7..85351ba 100644 --- a/sys/miscfs/devfs/devfs_vnops.c +++ b/sys/miscfs/devfs/devfs_vnops.c @@ -1933,7 +1933,6 @@ devfs_getpages(struct vop_getpages_args *ap) } if (!gotreqpage) { m = ap->a_m[ap->a_reqpage]; -#ifndef MAX_PERF printf("devfs_getpages: I/O read failure: (error code=%d)\n", error); printf(" size: %d, resid:" @@ -1942,7 +1941,6 @@ devfs_getpages(struct vop_getpages_args *ap) printf(" nread: %d, reqpage:" " %d, pindex: %d, pcount: %d\n", nread, ap->a_reqpage, m->pindex, pcount); -#endif /* * Free the buffer header back to the swap buffer pool. */ diff --git a/sys/miscfs/specfs/spec_vnops.c b/sys/miscfs/specfs/spec_vnops.c index 9641897..a9a711e 100644 --- a/sys/miscfs/specfs/spec_vnops.c +++ b/sys/miscfs/specfs/spec_vnops.c @@ -756,7 +756,6 @@ spec_getpages(ap) } if (!gotreqpage) { m = ap->a_m[ap->a_reqpage]; -#ifndef MAX_PERF printf( "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", devtoname(bp->b_dev), error, bp, bp->b_vp); @@ -766,7 +765,6 @@ spec_getpages(ap) printf( " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", nread, ap->a_reqpage, (u_long)m->pindex, pcount); -#endif /* * Free the buffer header back to the swap buffer pool. */ diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 82b2040..f7793cd 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -1006,14 +1006,12 @@ swap_pager_getpages(object, m, count, reqpage) mreq = m[reqpage]; -#if !defined(MAX_PERF) if (mreq->object != object) { panic("swap_pager_getpages: object mismatch %p/%p", object, mreq->object ); } -#endif /* * Calculate range to retrieve. The pages have already been assigned * their swapblks. We require a *contiguous* range that falls entirely @@ -1214,14 +1212,12 @@ swap_pager_putpages(object, m, count, sync, rtvals) int i; int n = 0; -#if !defined(MAX_PERF) if (count && m[0]->object != object) { panic("swap_pager_getpages: object mismatch %p/%p", object, m[0]->object ); } -#endif /* * Step 1 * @@ -1870,25 +1866,19 @@ swp_pager_meta_free_all(vm_object_t object) for (i = 0; i < SWAP_META_PAGES; ++i) { daddr_t v = swap->swb_pages[i]; if (v != SWAPBLK_NONE) { -#if !defined(MAX_PERF) --swap->swb_count; -#endif swp_pager_freeswapspace(v, 1); } } -#if !defined(MAX_PERF) if (swap->swb_count != 0) panic("swap_pager_meta_free_all: swb_count != 0"); -#endif *pswap = swap->swb_hnext; zfree(swap_zone, swap); --object->un_pager.swp.swp_bcount; } index += SWAP_META_PAGES; -#if !defined(MAX_PERF) if (index > 0x20000000) panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); -#endif } } diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 6da6c9b..23354bd 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -249,9 +249,7 @@ vm_object_reference(object) object->ref_count++; if (object->type == OBJT_VNODE) { while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) { -#if !defined(MAX_PERF) printf("vm_object_reference: delay in getting object\n"); -#endif } } } @@ -440,10 +438,8 @@ vm_object_terminate(object) */ s = splvm(); while ((p = TAILQ_FIRST(&object->memq)) != NULL) { -#if !defined(MAX_PERF) if (p->busy || (p->flags & PG_BUSY)) panic("vm_object_terminate: freeing busy page %p\n", p); -#endif if (p->wire_count == 0) { vm_page_busy(p); vm_page_free(p); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index edde291..08ccb8e 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -416,11 +416,9 @@ vm_page_remove(m) if (m->object == NULL) return; -#if !defined(MAX_PERF) if ((m->flags & PG_BUSY) == 0) { panic("vm_page_remove: page not busy"); } -#endif /* * Basically destroy the page. @@ -443,10 +441,8 @@ vm_page_remove(m) bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; while (*bucket != m) { -#if !defined(MAX_PERF) if (*bucket == NULL) panic("vm_page_remove(): page not found in hash"); -#endif bucket = &(*bucket)->hnext; } *bucket = m->hnext; @@ -1055,7 +1051,6 @@ vm_page_free_toq(vm_page_t m) cnt.v_tfree++; -#if !defined(MAX_PERF) if (m->busy || ((m->queue - m->pc) == PQ_FREE) || (m->hold_count != 0)) { printf( @@ -1067,7 +1062,6 @@ vm_page_free_toq(vm_page_t m) else panic("vm_page_free: freeing busy page"); } -#endif /* * unqueue, then remove page. Note that we cannot destroy @@ -1093,12 +1087,10 @@ vm_page_free_toq(vm_page_t m) vm_page_undirty(m); if (m->wire_count != 0) { -#if !defined(MAX_PERF) if (m->wire_count > 1) { panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx", m->wire_count, (long)m->pindex); } -#endif panic("vm_page_free: freeing wired page\n"); } @@ -1224,9 +1216,7 @@ vm_page_unwire(m, activate) } } } else { -#if !defined(MAX_PERF) panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count); -#endif } splx(s); } @@ -1288,12 +1278,10 @@ vm_page_cache(m) { int s; -#if !defined(MAX_PERF) if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { printf("vm_page_cache: attempting to cache busy page\n"); return; } -#endif if ((m->queue - m->pc) == PQ_CACHE) return; @@ -1303,12 +1291,10 @@ vm_page_cache(m) */ vm_page_protect(m, VM_PROT_NONE); -#if !defined(MAX_PERF) if (m->dirty != 0) { panic("vm_page_cache: caching a dirty page, pindex: %ld", (long)m->pindex); } -#endif s = splvm(); vm_page_unqueue_nowakeup(m); m->queue = PQ_CACHE + m->pc; @@ -1692,14 +1678,12 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map) vm_page_t pga = vm_page_array; size = round_page(size); -#if !defined(MAX_PERF) if (size == 0) panic("contigmalloc1: size must not be 0"); if ((alignment & (alignment - 1)) != 0) panic("contigmalloc1: alignment must be a power of 2"); if ((boundary & (boundary - 1)) != 0) panic("contigmalloc1: boundary must be a power of 2"); -#endif start = 0; for (pass = 0; pass <= 1; pass++) { |