summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>1996-03-11 19:54:35 +0000
committerpeter <peter@FreeBSD.org>1996-03-11 19:54:35 +0000
commitf8b9f31b0ab6a6d7514f307d7a789fc9157a4a7a (patch)
tree950d5dab28058beacf8c4cf5075f77b1c08bcd10 /sys
parente68f37c99a71bdc8513e41b7bd4bff8b8f99eed6 (diff)
downloadFreeBSD-src-f8b9f31b0ab6a6d7514f307d7a789fc9157a4a7a.zip
FreeBSD-src-f8b9f31b0ab6a6d7514f307d7a789fc9157a4a7a.tar.gz
Import 4.4BSD-Lite2 onto the vendor branch, note that in the kernel, all
files are off the vendor branch, so this should not change anything. A "U" marker generally means that the file was not changed in between the 4.4Lite and Lite-2 releases, and does not need a merge. "C" generally means that there was a change.
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/kern_lock.c849
-rw-r--r--sys/vm/lock.h254
-rw-r--r--sys/vm/vm.h12
-rw-r--r--sys/vm/vm_extern.h18
-rw-r--r--sys/vm/vm_fault.c8
-rw-r--r--sys/vm/vm_glue.c34
-rw-r--r--sys/vm/vm_kern.c28
-rw-r--r--sys/vm/vm_map.c104
-rw-r--r--sys/vm/vm_map.h40
-rw-r--r--sys/vm/vm_meter.c23
-rw-r--r--sys/vm/vm_mmap.c298
-rw-r--r--sys/vm/vm_object.c67
-rw-r--r--sys/vm/vm_object.h6
-rw-r--r--sys/vm/vm_page.c48
-rw-r--r--sys/vm/vm_page.h6
-rw-r--r--sys/vm/vm_pageout.c20
-rw-r--r--sys/vm/vm_pageout.h6
-rw-r--r--sys/vm/vm_pager.c24
-rw-r--r--sys/vm/vm_pager.h16
-rw-r--r--sys/vm/vm_param.h27
-rw-r--r--sys/vm/vm_unix.c6
-rw-r--r--sys/vm/vnode_pager.c34
22 files changed, 1034 insertions, 894 deletions
diff --git a/sys/vm/kern_lock.c b/sys/vm/kern_lock.c
index c4fa052..ddc9185 100644
--- a/sys/vm/kern_lock.c
+++ b/sys/vm/kern_lock.c
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 1991, 1993
+ * Copyright (c) 1995
* The Regents of the University of California. All rights reserved.
*
- * This code is derived from software contributed to Berkeley by
- * The Mach Operating System project at Carnegie-Mellon University.
+ * This code contains ideas from software contributed to Berkeley by
+ * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
+ * System project at Carnegie-Mellon University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,502 +34,498 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)kern_lock.c 8.1 (Berkeley) 6/11/93
- *
- *
- * Copyright (c) 1987, 1990 Carnegie-Mellon University.
- * All rights reserved.
- *
- * Authors: Avadis Tevanian, Jr., Michael Wayne Young
- *
- * Permission to use, copy, modify and distribute this software and
- * its documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie the
- * rights to redistribute these changes.
- */
-
-/*
- * Locking primitives implementation
+ * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
*/
#include <sys/param.h>
-#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <machine/cpu.h>
-#include <vm/vm.h>
+/*
+ * Locking primitives implementation.
+ * Locks provide shared/exclusive sychronization.
+ */
-/* XXX */
-#include <sys/proc.h>
-typedef int *thread_t;
-#define current_thread() ((thread_t)&curproc->p_thread)
-/* XXX */
+#ifdef DEBUG
+#define COUNT(p, x) if (p) (p)->p_locks += (x)
+#else
+#define COUNT(p, x)
+#endif
-#if NCPUS > 1
+#if NCPUS > 1
/*
- * Module: lock
- * Function:
- * Provide reader/writer sychronization.
- * Implementation:
- * Simple interlock on a bit. Readers first interlock
- * increment the reader count, then let go. Writers hold
- * the interlock (thus preventing further readers), and
- * wait for already-accepted readers to go away.
+ * For multiprocessor system, try spin lock first.
+ *
+ * This should be inline expanded below, but we cannot have #if
+ * inside a multiline define.
*/
+int lock_wait_time = 100;
+#define PAUSE(lkp, wanted) \
+ if (lock_wait_time > 0) { \
+ int i; \
+ \
+ simple_unlock(&lkp->lk_interlock); \
+ for (i = lock_wait_time; i > 0; i--) \
+ if (!(wanted)) \
+ break; \
+ simple_lock(&lkp->lk_interlock); \
+ } \
+ if (!(wanted)) \
+ break;
+
+#else /* NCPUS == 1 */
/*
- * The simple-lock routines are the primitives out of which
- * the lock package is built. The implementation is left
- * to the machine-dependent code.
+ * It is an error to spin on a uniprocessor as nothing will ever cause
+ * the simple lock to clear while we are executing.
*/
+#define PAUSE(lkp, wanted)
+
+#endif /* NCPUS == 1 */
-#ifdef notdef
/*
- * A sample implementation of simple locks.
- * assumes:
- * boolean_t test_and_set(boolean_t *)
- * indivisibly sets the boolean to TRUE
- * and returns its old value
- * and that setting a boolean to FALSE is indivisible.
+ * Acquire a resource.
*/
+#define ACQUIRE(lkp, error, extflags, wanted) \
+ PAUSE(lkp, wanted); \
+ for (error = 0; wanted; ) { \
+ (lkp)->lk_waitcount++; \
+ simple_unlock(&(lkp)->lk_interlock); \
+ error = tsleep((void *)lkp, (lkp)->lk_prio, \
+ (lkp)->lk_wmesg, (lkp)->lk_timo); \
+ simple_lock(&(lkp)->lk_interlock); \
+ (lkp)->lk_waitcount--; \
+ if (error) \
+ break; \
+ if ((extflags) & LK_SLEEPFAIL) { \
+ error = ENOLCK; \
+ break; \
+ } \
+ }
+
/*
- * simple_lock_init initializes a simple lock. A simple lock
- * may only be used for exclusive locks.
+ * Initialize a lock; required before use.
*/
-
-void simple_lock_init(l)
- simple_lock_t l;
+void
+lockinit(lkp, prio, wmesg, timo, flags)
+ struct lock *lkp;
+ int prio;
+ char *wmesg;
+ int timo;
+ int flags;
{
- *(boolean_t *)l = FALSE;
-}
-void simple_lock(l)
- simple_lock_t l;
-{
- while (test_and_set((boolean_t *)l))
- continue;
-}
-
-void simple_unlock(l)
- simple_lock_t l;
-{
- *(boolean_t *)l = FALSE;
-}
-
-boolean_t simple_lock_try(l)
- simple_lock_t l;
-{
- return (!test_and_set((boolean_t *)l));
+ bzero(lkp, sizeof(struct lock));
+ simple_lock_init(&lkp->lk_interlock);
+ lkp->lk_flags = flags & LK_EXTFLG_MASK;
+ lkp->lk_prio = prio;
+ lkp->lk_timo = timo;
+ lkp->lk_wmesg = wmesg;
+ lkp->lk_lockholder = LK_NOPROC;
}
-#endif /* notdef */
-#endif /* NCPUS > 1 */
-
-#if NCPUS > 1
-int lock_wait_time = 100;
-#else /* NCPUS > 1 */
-
- /*
- * It is silly to spin on a uni-processor as if we
- * thought something magical would happen to the
- * want_write bit while we are executing.
- */
-int lock_wait_time = 0;
-#endif /* NCPUS > 1 */
-
/*
- * Routine: lock_init
- * Function:
- * Initialize a lock; required before use.
- * Note that clients declare the "struct lock"
- * variables and then initialize them, rather
- * than getting a new one from this module.
+ * Determine the status of a lock.
*/
-void lock_init(l, can_sleep)
- lock_t l;
- boolean_t can_sleep;
+int
+lockstatus(lkp)
+ struct lock *lkp;
{
- bzero(l, sizeof(lock_data_t));
- simple_lock_init(&l->interlock);
- l->want_write = FALSE;
- l->want_upgrade = FALSE;
- l->read_count = 0;
- l->can_sleep = can_sleep;
- l->thread = (char *)-1; /* XXX */
- l->recursion_depth = 0;
+ int lock_type = 0;
+
+ simple_lock(&lkp->lk_interlock);
+ if (lkp->lk_exclusivecount != 0)
+ lock_type = LK_EXCLUSIVE;
+ else if (lkp->lk_sharecount != 0)
+ lock_type = LK_SHARED;
+ simple_unlock(&lkp->lk_interlock);
+ return (lock_type);
}
-void lock_sleepable(l, can_sleep)
- lock_t l;
- boolean_t can_sleep;
-{
- simple_lock(&l->interlock);
- l->can_sleep = can_sleep;
- simple_unlock(&l->interlock);
-}
-
-
/*
- * Sleep locks. These use the same data structure and algorithm
- * as the spin locks, but the process sleeps while it is waiting
- * for the lock. These work on uniprocessor systems.
+ * Set, change, or release a lock.
+ *
+ * Shared requests increment the shared count. Exclusive requests set the
+ * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
+ * accepted shared locks and shared-to-exclusive upgrades to go away.
*/
-
-void lock_write(l)
- register lock_t l;
+int
+lockmgr(lkp, flags, interlkp, p)
+ __volatile struct lock *lkp;
+ u_int flags;
+ struct simplelock *interlkp;
+ struct proc *p;
{
- register int i;
-
- simple_lock(&l->interlock);
-
- if (((thread_t)l->thread) == current_thread()) {
- /*
- * Recursive lock.
- */
- l->recursion_depth++;
- simple_unlock(&l->interlock);
- return;
- }
+ int error;
+ pid_t pid;
+ int extflags;
+ error = 0;
+ if (p)
+ pid = p->p_pid;
+ else
+ pid = LK_KERNPROC;
+ simple_lock(&lkp->lk_interlock);
+ if (flags & LK_INTERLOCK)
+ simple_unlock(interlkp);
+ extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
+#ifdef DIAGNOSTIC
/*
- * Try to acquire the want_write bit.
+ * Once a lock has drained, the LK_DRAINING flag is set and an
+ * exclusive lock is returned. The only valid operation thereafter
+ * is a single release of that exclusive lock. This final release
+ * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
+ * further requests of any sort will result in a panic. The bits
+ * selected for these two flags are chosen so that they will be set
+ * in memory that is freed (freed memory is filled with 0xdeadbeef).
+ * The final release is permitted to give a new lease on life to
+ * the lock by specifying LK_REENABLE.
*/
- while (l->want_write) {
- if ((i = lock_wait_time) > 0) {
- simple_unlock(&l->interlock);
- while (--i > 0 && l->want_write)
- continue;
- simple_lock(&l->interlock);
+ if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
+ if (lkp->lk_flags & LK_DRAINED)
+ panic("lockmgr: using decommissioned lock");
+ if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
+ lkp->lk_lockholder != pid)
+ panic("lockmgr: non-release on draining lock: %d\n",
+ flags & LK_TYPE_MASK);
+ lkp->lk_flags &= ~LK_DRAINING;
+ if ((flags & LK_REENABLE) == 0)
+ lkp->lk_flags |= LK_DRAINED;
+ }
+#endif DIAGNOSTIC
+
+ switch (flags & LK_TYPE_MASK) {
+
+ case LK_SHARED:
+ if (lkp->lk_lockholder != pid) {
+ /*
+ * If just polling, check to see if we will block.
+ */
+ if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
+ error = EBUSY;
+ break;
+ }
+ /*
+ * Wait for exclusive locks and upgrades to clear.
+ */
+ ACQUIRE(lkp, error, extflags, lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
+ if (error)
+ break;
+ lkp->lk_sharecount++;
+ COUNT(p, 1);
+ break;
}
-
- if (l->can_sleep && l->want_write) {
- l->waiting = TRUE;
- thread_sleep((int) l, &l->interlock, FALSE);
- simple_lock(&l->interlock);
+ /*
+ * We hold an exclusive lock, so downgrade it to shared.
+ * An alternative would be to fail with EDEADLK.
+ */
+ lkp->lk_sharecount++;
+ COUNT(p, 1);
+ /* fall into downgrade */
+
+ case LK_DOWNGRADE:
+ if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
+ panic("lockmgr: not holding exclusive lock");
+ lkp->lk_sharecount += lkp->lk_exclusivecount;
+ lkp->lk_exclusivecount = 0;
+ lkp->lk_flags &= ~LK_HAVE_EXCL;
+ lkp->lk_lockholder = LK_NOPROC;
+ if (lkp->lk_waitcount)
+ wakeup((void *)lkp);
+ break;
+
+ case LK_EXCLUPGRADE:
+ /*
+ * If another process is ahead of us to get an upgrade,
+ * then we want to fail rather than have an intervening
+ * exclusive access.
+ */
+ if (lkp->lk_flags & LK_WANT_UPGRADE) {
+ lkp->lk_sharecount--;
+ COUNT(p, -1);
+ error = EBUSY;
+ break;
}
- }
- l->want_write = TRUE;
-
- /* Wait for readers (and upgrades) to finish */
+ /* fall into normal upgrade */
- while ((l->read_count != 0) || l->want_upgrade) {
- if ((i = lock_wait_time) > 0) {
- simple_unlock(&l->interlock);
- while (--i > 0 && (l->read_count != 0 ||
- l->want_upgrade))
- continue;
- simple_lock(&l->interlock);
+ case LK_UPGRADE:
+ /*
+ * Upgrade a shared lock to an exclusive one. If another
+ * shared lock has already requested an upgrade to an
+ * exclusive lock, our shared lock is released and an
+ * exclusive lock is requested (which will be granted
+ * after the upgrade). If we return an error, the file
+ * will always be unlocked.
+ */
+ if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
+ panic("lockmgr: upgrade exclusive lock");
+ lkp->lk_sharecount--;
+ COUNT(p, -1);
+ /*
+ * If we are just polling, check to see if we will block.
+ */
+ if ((extflags & LK_NOWAIT) &&
+ ((lkp->lk_flags & LK_WANT_UPGRADE) ||
+ lkp->lk_sharecount > 1)) {
+ error = EBUSY;
+ break;
}
-
- if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
- l->waiting = TRUE;
- thread_sleep((int) l, &l->interlock, FALSE);
- simple_lock(&l->interlock);
+ if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
+ /*
+ * We are first shared lock to request an upgrade, so
+ * request upgrade and wait for the shared count to
+ * drop to zero, then take exclusive lock.
+ */
+ lkp->lk_flags |= LK_WANT_UPGRADE;
+ ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
+ lkp->lk_flags &= ~LK_WANT_UPGRADE;
+ if (error)
+ break;
+ lkp->lk_flags |= LK_HAVE_EXCL;
+ lkp->lk_lockholder = pid;
+ if (lkp->lk_exclusivecount != 0)
+ panic("lockmgr: non-zero exclusive count");
+ lkp->lk_exclusivecount = 1;
+ COUNT(p, 1);
+ break;
}
- }
- simple_unlock(&l->interlock);
-}
-
-void lock_done(l)
- register lock_t l;
-{
- simple_lock(&l->interlock);
-
- if (l->read_count != 0)
- l->read_count--;
- else
- if (l->recursion_depth != 0)
- l->recursion_depth--;
- else
- if (l->want_upgrade)
- l->want_upgrade = FALSE;
- else
- l->want_write = FALSE;
-
- if (l->waiting) {
- l->waiting = FALSE;
- thread_wakeup((int) l);
- }
- simple_unlock(&l->interlock);
-}
-
-void lock_read(l)
- register lock_t l;
-{
- register int i;
-
- simple_lock(&l->interlock);
-
- if (((thread_t)l->thread) == current_thread()) {
/*
- * Recursive lock.
+ * Someone else has requested upgrade. Release our shared
+ * lock, awaken upgrade requestor if we are the last shared
+ * lock, then request an exclusive lock.
*/
- l->read_count++;
- simple_unlock(&l->interlock);
- return;
- }
-
- while (l->want_write || l->want_upgrade) {
- if ((i = lock_wait_time) > 0) {
- simple_unlock(&l->interlock);
- while (--i > 0 && (l->want_write || l->want_upgrade))
- continue;
- simple_lock(&l->interlock);
+ if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
+ wakeup((void *)lkp);
+ /* fall into exclusive request */
+
+ case LK_EXCLUSIVE:
+ if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
+ /*
+ * Recursive lock.
+ */
+ if ((extflags & LK_CANRECURSE) == 0)
+ panic("lockmgr: locking against myself");
+ lkp->lk_exclusivecount++;
+ COUNT(p, 1);
+ break;
}
-
- if (l->can_sleep && (l->want_write || l->want_upgrade)) {
- l->waiting = TRUE;
- thread_sleep((int) l, &l->interlock, FALSE);
- simple_lock(&l->interlock);
+ /*
+ * If we are just polling, check to see if we will sleep.
+ */
+ if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ lkp->lk_sharecount != 0)) {
+ error = EBUSY;
+ break;
}
- }
-
- l->read_count++;
- simple_unlock(&l->interlock);
-}
-
-/*
- * Routine: lock_read_to_write
- * Function:
- * Improves a read-only lock to one with
- * write permission. If another reader has
- * already requested an upgrade to a write lock,
- * no lock is held upon return.
- *
- * Returns TRUE if the upgrade *failed*.
- */
-boolean_t lock_read_to_write(l)
- register lock_t l;
-{
- register int i;
-
- simple_lock(&l->interlock);
-
- l->read_count--;
-
- if (((thread_t)l->thread) == current_thread()) {
/*
- * Recursive lock.
+ * Try to acquire the want_exclusive flag.
*/
- l->recursion_depth++;
- simple_unlock(&l->interlock);
- return(FALSE);
- }
-
- if (l->want_upgrade) {
+ ACQUIRE(lkp, error, extflags, lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL));
+ if (error)
+ break;
+ lkp->lk_flags |= LK_WANT_EXCL;
/*
- * Someone else has requested upgrade.
- * Since we've released a read lock, wake
- * him up.
+ * Wait for shared locks and upgrades to finish.
*/
- if (l->waiting) {
- l->waiting = FALSE;
- thread_wakeup((int) l);
+ ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
+ (lkp->lk_flags & LK_WANT_UPGRADE));
+ lkp->lk_flags &= ~LK_WANT_EXCL;
+ if (error)
+ break;
+ lkp->lk_flags |= LK_HAVE_EXCL;
+ lkp->lk_lockholder = pid;
+ if (lkp->lk_exclusivecount != 0)
+ panic("lockmgr: non-zero exclusive count");
+ lkp->lk_exclusivecount = 1;
+ COUNT(p, 1);
+ break;
+
+ case LK_RELEASE:
+ if (lkp->lk_exclusivecount != 0) {
+ if (pid != lkp->lk_lockholder)
+ panic("lockmgr: pid %d, not %s %d unlocking",
+ pid, "exclusive lock holder",
+ lkp->lk_lockholder);
+ lkp->lk_exclusivecount--;
+ COUNT(p, -1);
+ if (lkp->lk_exclusivecount == 0) {
+ lkp->lk_flags &= ~LK_HAVE_EXCL;
+ lkp->lk_lockholder = LK_NOPROC;
+ }
+ } else if (lkp->lk_sharecount != 0) {
+ lkp->lk_sharecount--;
+ COUNT(p, -1);
}
+ if (lkp->lk_waitcount)
+ wakeup((void *)lkp);
+ break;
- simple_unlock(&l->interlock);
- return (TRUE);
- }
-
- l->want_upgrade = TRUE;
-
- while (l->read_count != 0) {
- if ((i = lock_wait_time) > 0) {
- simple_unlock(&l->interlock);
- while (--i > 0 && l->read_count != 0)
- continue;
- simple_lock(&l->interlock);
+ case LK_DRAIN:
+ /*
+ * Check that we do not already hold the lock, as it can
+ * never drain if we do. Unfortunately, we have no way to
+ * check for holding a shared lock, but at least we can
+ * check for an exclusive one.
+ */
+ if (lkp->lk_lockholder == pid)
+ panic("lockmgr: draining against myself");
+ /*
+ * If we are just polling, check to see if we will sleep.
+ */
+ if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
+ error = EBUSY;
+ break;
}
-
- if (l->can_sleep && l->read_count != 0) {
- l->waiting = TRUE;
- thread_sleep((int) l, &l->interlock, FALSE);
- simple_lock(&l->interlock);
+ PAUSE(lkp, ((lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
+ for (error = 0; ((lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
+ lkp->lk_flags |= LK_WAITDRAIN;
+ simple_unlock(&lkp->lk_interlock);
+ if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
+ lkp->lk_wmesg, lkp->lk_timo))
+ return (error);
+ if ((extflags) & LK_SLEEPFAIL)
+ return (ENOLCK);
+ simple_lock(&lkp->lk_interlock);
}
+ lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
+ lkp->lk_lockholder = pid;
+ lkp->lk_exclusivecount = 1;
+ COUNT(p, 1);
+ break;
+
+ default:
+ simple_unlock(&lkp->lk_interlock);
+ panic("lockmgr: unknown locktype request %d",
+ flags & LK_TYPE_MASK);
+ /* NOTREACHED */
}
-
- simple_unlock(&l->interlock);
- return (FALSE);
-}
-
-void lock_write_to_read(l)
- register lock_t l;
-{
- simple_lock(&l->interlock);
-
- l->read_count++;
- if (l->recursion_depth != 0)
- l->recursion_depth--;
- else
- if (l->want_upgrade)
- l->want_upgrade = FALSE;
- else
- l->want_write = FALSE;
-
- if (l->waiting) {
- l->waiting = FALSE;
- thread_wakeup((int) l);
+ if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
+ lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
+ lkp->lk_flags &= ~LK_WAITDRAIN;
+ wakeup((void *)&lkp->lk_flags);
}
-
- simple_unlock(&l->interlock);
+ simple_unlock(&lkp->lk_interlock);
+ return (error);
}
-
/*
- * Routine: lock_try_write
- * Function:
- * Tries to get a write lock.
- *
- * Returns FALSE if the lock is not held on return.
+ * Print out information about state of a lock. Used by VOP_PRINT
+ * routines to display ststus about contained locks.
*/
-
-boolean_t lock_try_write(l)
- register lock_t l;
+lockmgr_printinfo(lkp)
+ struct lock *lkp;
{
- simple_lock(&l->interlock);
-
- if (((thread_t)l->thread) == current_thread()) {
- /*
- * Recursive lock
- */
- l->recursion_depth++;
- simple_unlock(&l->interlock);
- return(TRUE);
- }
-
- if (l->want_write || l->want_upgrade || l->read_count) {
- /*
- * Can't get lock.
- */
- simple_unlock(&l->interlock);
- return(FALSE);
- }
-
- /*
- * Have lock.
- */
-
- l->want_write = TRUE;
- simple_unlock(&l->interlock);
- return(TRUE);
+ if (lkp->lk_sharecount)
+ printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
+ lkp->lk_sharecount);
+ else if (lkp->lk_flags & LK_HAVE_EXCL)
+ printf(" lock type %s: EXCL (count %d) by pid %d",
+ lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
+ if (lkp->lk_waitcount > 0)
+ printf(" with %d pending", lkp->lk_waitcount);
}
+#if defined(DEBUG) && NCPUS == 1
+#include <sys/kernel.h>
+#include <vm/vm.h>
+#include <sys/sysctl.h>
+int lockpausetime = 0;
+struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
+int simplelockrecurse;
/*
- * Routine: lock_try_read
- * Function:
- * Tries to get a read lock.
- *
- * Returns FALSE if the lock is not held on return.
+ * Simple lock functions so that the debugger can see from whence
+ * they are being called.
*/
-
-boolean_t lock_try_read(l)
- register lock_t l;
+void
+simple_lock_init(alp)
+ struct simplelock *alp;
{
- simple_lock(&l->interlock);
-
- if (((thread_t)l->thread) == current_thread()) {
- /*
- * Recursive lock
- */
- l->read_count++;
- simple_unlock(&l->interlock);
- return(TRUE);
- }
- if (l->want_write || l->want_upgrade) {
- simple_unlock(&l->interlock);
- return(FALSE);
- }
-
- l->read_count++;
- simple_unlock(&l->interlock);
- return(TRUE);
+ alp->lock_data = 0;
}
-/*
- * Routine: lock_try_read_to_write
- * Function:
- * Improves a read-only lock to one with
- * write permission. If another reader has
- * already requested an upgrade to a write lock,
- * the read lock is still held upon return.
- *
- * Returns FALSE if the upgrade *failed*.
- */
-boolean_t lock_try_read_to_write(l)
- register lock_t l;
+void
+_simple_lock(alp, id, l)
+ __volatile struct simplelock *alp;
+ const char *id;
+ int l;
{
- simple_lock(&l->interlock);
-
- if (((thread_t)l->thread) == current_thread()) {
- /*
- * Recursive lock
- */
- l->read_count--;
- l->recursion_depth++;
- simple_unlock(&l->interlock);
- return(TRUE);
- }
-
- if (l->want_upgrade) {
- simple_unlock(&l->interlock);
- return(FALSE);
- }
- l->want_upgrade = TRUE;
- l->read_count--;
-
- while (l->read_count != 0) {
- l->waiting = TRUE;
- thread_sleep((int) l, &l->interlock, FALSE);
- simple_lock(&l->interlock);
+ if (simplelockrecurse)
+ return;
+ if (alp->lock_data == 1) {
+ if (lockpausetime == -1)
+ panic("%s:%d: simple_lock: lock held", id, l);
+ printf("%s:%d: simple_lock: lock held\n", id, l);
+ if (lockpausetime == 1) {
+ BACKTRACE(curproc);
+ } else if (lockpausetime > 1) {
+ printf("%s:%d: simple_lock: lock held...", id, l);
+ tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
+ lockpausetime * hz);
+ printf(" continuing\n");
+ }
}
-
- simple_unlock(&l->interlock);
- return(TRUE);
+ alp->lock_data = 1;
+ if (curproc)
+ curproc->p_simple_locks++;
}
-/*
- * Allow a process that has a lock for write to acquire it
- * recursively (for read, write, or update).
- */
-void lock_set_recursive(l)
- lock_t l;
+int
+_simple_lock_try(alp, id, l)
+ __volatile struct simplelock *alp;
+ const char *id;
+ int l;
{
- simple_lock(&l->interlock);
- if (!l->want_write) {
- panic("lock_set_recursive: don't have write lock");
- }
- l->thread = (char *) current_thread();
- simple_unlock(&l->interlock);
+
+ if (alp->lock_data)
+ return (0);
+ if (simplelockrecurse)
+ return (1);
+ alp->lock_data = 1;
+ if (curproc)
+ curproc->p_simple_locks++;
+ return (1);
}
-/*
- * Prevent a lock from being re-acquired.
- */
-void lock_clear_recursive(l)
- lock_t l;
+void
+_simple_unlock(alp, id, l)
+ __volatile struct simplelock *alp;
+ const char *id;
+ int l;
{
- simple_lock(&l->interlock);
- if (((thread_t) l->thread) != current_thread()) {
- panic("lock_clear_recursive: wrong thread");
+
+ if (simplelockrecurse)
+ return;
+ if (alp->lock_data == 0) {
+ if (lockpausetime == -1)
+ panic("%s:%d: simple_unlock: lock not held", id, l);
+ printf("%s:%d: simple_unlock: lock not held\n", id, l);
+ if (lockpausetime == 1) {
+ BACKTRACE(curproc);
+ } else if (lockpausetime > 1) {
+ printf("%s:%d: simple_unlock: lock not held...", id, l);
+ tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
+ lockpausetime * hz);
+ printf(" continuing\n");
+ }
}
- if (l->recursion_depth == 0)
- l->thread = (char *)-1; /* XXX */
- simple_unlock(&l->interlock);
+ alp->lock_data = 0;
+ if (curproc)
+ curproc->p_simple_locks--;
}
+#endif /* DEBUG && NCPUS == 1 */
diff --git a/sys/vm/lock.h b/sys/vm/lock.h
index 26bed1f..6adf53b 100644
--- a/sys/vm/lock.h
+++ b/sys/vm/lock.h
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 1991, 1993
+ * Copyright (c) 1995
* The Regents of the University of California. All rights reserved.
*
- * This code is derived from software contributed to Berkeley by
- * The Mach Operating System project at Carnegie-Mellon University.
+ * This code contains ideas from software contributed to Berkeley by
+ * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
+ * System project at Carnegie-Mellon University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,140 +34,147 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)lock.h 8.1 (Berkeley) 6/11/93
- *
- *
- * Copyright (c) 1987, 1990 Carnegie-Mellon University.
- * All rights reserved.
- *
- * Authors: Avadis Tevanian, Jr., Michael Wayne Young
- *
- * Permission to use, copy, modify and distribute this software and
- * its documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie the
- * rights to redistribute these changes.
- */
-
-/*
- * Locking primitives definitions
+ * @(#)lock.h 8.12 (Berkeley) 5/19/95
*/
#ifndef _LOCK_H_
#define _LOCK_H_
-#define NCPUS 1 /* XXX */
-
/*
- * A simple spin lock.
+ * The general lock structure. Provides for multiple shared locks,
+ * upgrading from shared to exclusive, and sleeping until the lock
+ * can be gained. The simple locks are defined in <machine/param.h>.
*/
-
-struct slock {
- int lock_data; /* in general 1 bit is sufficient */
+struct lock {
+ struct simplelock lk_interlock; /* lock on remaining fields */
+ u_int lk_flags; /* see below */
+ int lk_sharecount; /* # of accepted shared locks */
+ int lk_waitcount; /* # of processes sleeping for lock */
+ short lk_exclusivecount; /* # of recursive exclusive locks */
+ short lk_prio; /* priority at which to sleep */
+ char *lk_wmesg; /* resource sleeping (for tsleep) */
+ int lk_timo; /* maximum sleep time (for tsleep) */
+ pid_t lk_lockholder; /* pid of exclusive lock holder */
};
-
-typedef struct slock simple_lock_data_t;
-typedef struct slock *simple_lock_t;
-
/*
- * The general lock structure. Provides for multiple readers,
- * upgrading from read to write, and sleeping until the lock
- * can be gained.
+ * Lock request types:
+ * LK_SHARED - get one of many possible shared locks. If a process
+ * holding an exclusive lock requests a shared lock, the exclusive
+ * lock(s) will be downgraded to shared locks.
+ * LK_EXCLUSIVE - stop further shared locks, when they are cleared,
+ * grant a pending upgrade if it exists, then grant an exclusive
+ * lock. Only one exclusive lock may exist at a time, except that
+ * a process holding an exclusive lock may get additional exclusive
+ * locks if it explicitly sets the LK_CANRECURSE flag in the lock
+ * request, or if the LK_CANRECUSE flag was set when the lock was
+ * initialized.
+ * LK_UPGRADE - the process must hold a shared lock that it wants to
+ * have upgraded to an exclusive lock. Other processes may get
+ * exclusive access to the resource between the time that the upgrade
+ * is requested and the time that it is granted.
+ * LK_EXCLUPGRADE - the process must hold a shared lock that it wants to
+ * have upgraded to an exclusive lock. If the request succeeds, no
+ * other processes will have gotten exclusive access to the resource
+ * between the time that the upgrade is requested and the time that
+ * it is granted. However, if another process has already requested
+ * an upgrade, the request will fail (see error returns below).
+ * LK_DOWNGRADE - the process must hold an exclusive lock that it wants
+ * to have downgraded to a shared lock. If the process holds multiple
+ * (recursive) exclusive locks, they will all be downgraded to shared
+ * locks.
+ * LK_RELEASE - release one instance of a lock.
+ * LK_DRAIN - wait for all activity on the lock to end, then mark it
+ * decommissioned. This feature is used before freeing a lock that
+ * is part of a piece of memory that is about to be freed.
+ *
+ * These are flags that are passed to the lockmgr routine.
*/
+#define LK_TYPE_MASK 0x0000000f /* type of lock sought */
+#define LK_SHARED 0x00000001 /* shared lock */
+#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
+#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
+#define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */
+#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
+#define LK_RELEASE 0x00000006 /* release any type of lock */
+#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
+/*
+ * External lock flags.
+ *
+ * The first three flags may be set in lock_init to set their mode permanently,
+ * or passed in as arguments to the lock manager. The LK_REENABLE flag may be
+ * set only at the release of a lock obtained by drain.
+ */
+#define LK_EXTFLG_MASK 0x00000070 /* mask of external flags */
+#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
+#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
+#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
+#define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */
+/*
+ * Internal lock flags.
+ *
+ * These flags are used internally to the lock manager.
+ */
+#define LK_WANT_UPGRADE 0x00000100 /* waiting for share-to-excl upgrade */
+#define LK_WANT_EXCL 0x00000200 /* exclusive lock sought */
+#define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */
+#define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */
+#define LK_DRAINING 0x00004000 /* lock is being drained */
+#define LK_DRAINED 0x00008000 /* lock has been decommissioned */
+/*
+ * Control flags
+ *
+ * Non-persistent external flags.
+ */
+#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
+ getting lk_interlock */
+#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
-struct lock {
-#ifdef vax
- /*
- * Efficient VAX implementation -- see field description below.
- */
- unsigned int read_count:16,
- want_upgrade:1,
- want_write:1,
- waiting:1,
- can_sleep:1,
- :0;
-
- simple_lock_data_t interlock;
-#else /* vax */
-#ifdef ns32000
- /*
- * Efficient ns32000 implementation --
- * see field description below.
- */
- simple_lock_data_t interlock;
- unsigned int read_count:16,
- want_upgrade:1,
- want_write:1,
- waiting:1,
- can_sleep:1,
- :0;
-
-#else /* ns32000 */
- /* Only the "interlock" field is used for hardware exclusion;
- * other fields are modified with normal instructions after
- * acquiring the interlock bit.
- */
- simple_lock_data_t
- interlock; /* Interlock for remaining fields */
- boolean_t want_write; /* Writer is waiting, or locked for write */
- boolean_t want_upgrade; /* Read-to-write upgrade waiting */
- boolean_t waiting; /* Someone is sleeping on lock */
- boolean_t can_sleep; /* Can attempts to lock go to sleep */
- int read_count; /* Number of accepted readers */
-#endif /* ns32000 */
-#endif /* vax */
- char *thread; /* Thread that has lock, if recursive locking allowed */
- /* (should be thread_t, but but we then have mutually
- recursive definitions) */
- int recursion_depth;/* Depth of recursion */
-};
+/*
+ * Lock return status.
+ *
+ * Successfully obtained locks return 0. Locks will always succeed
+ * unless one of the following is true:
+ * LK_FORCEUPGRADE is requested and some other process has already
+ * requested a lock upgrade (returns EBUSY).
+ * LK_WAIT is set and a sleep would be required (returns EBUSY).
+ * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
+ * PCATCH is set in lock priority and a signal arrives (returns
+ * either EINTR or ERESTART if system calls is to be restarted).
+ * Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
+ * A failed lock attempt always returns a non-zero error value. No lock
+ * is held after an error return (in particular, a failed LK_UPGRADE
+ * or LK_FORCEUPGRADE will have released its shared access lock).
+ */
-typedef struct lock lock_data_t;
-typedef struct lock *lock_t;
+/*
+ * Indicator that no process holds exclusive lock
+ */
+#define LK_KERNPROC ((pid_t) -2)
+#define LK_NOPROC ((pid_t) -1)
-#if NCPUS > 1
-__BEGIN_DECLS
-void simple_lock __P((simple_lock_t));
-void simple_lock_init __P((simple_lock_t));
-boolean_t simple_lock_try __P((simple_lock_t));
-void simple_unlock __P((simple_lock_t));
-__END_DECLS
-#else /* No multiprocessor locking is necessary. */
-#define simple_lock(l)
-#define simple_lock_init(l)
-#define simple_lock_try(l) (1) /* Always succeeds. */
-#define simple_unlock(l)
-#endif
+struct proc;
-/* Sleep locks must work even if no multiprocessing. */
+void lockinit __P((struct lock *, int prio, char *wmesg, int timo,
+ int flags));
+int lockmgr __P((__volatile struct lock *, u_int flags,
+ struct simplelock *, struct proc *p));
+int lockstatus __P((struct lock *));
-#define lock_read_done(l) lock_done(l)
-#define lock_write_done(l) lock_done(l)
+#ifdef DEBUG
+void _simple_unlock __P((__volatile struct simplelock *alp, const char *, int));
+#define simple_unlock(alp) _simple_unlock(alp, __FILE__, __LINE__)
+int _simple_lock_try __P((__volatile struct simplelock *alp, const char *, int));
+#define simple_lock_try(alp) _simple_lock_try(alp, __FILE__, __LINE__)
+void _simple_lock __P((__volatile struct simplelock *alp, const char *, int));
+#define simple_lock(alp) _simple_lock(alp, __FILE__, __LINE__)
+void simple_lock_init __P((struct simplelock *alp));
+#else /* !DEBUG */
+#if NCPUS == 1 /* no multiprocessor locking is necessary */
+#define simple_lock_init(alp)
+#define simple_lock(alp)
+#define simple_lock_try(alp) (1) /* always succeeds */
+#define simple_unlock(alp)
+#endif /* NCPUS == 1 */
+#endif /* !DEBUG */
-void lock_clear_recursive __P((lock_t));
-void lock_done __P((lock_t));
-void lock_init __P((lock_t, boolean_t));
-void lock_read __P((lock_t));
-boolean_t lock_read_to_write __P((lock_t));
-void lock_set_recursive __P((lock_t));
-void lock_sleepable __P((lock_t, boolean_t));
-boolean_t lock_try_read __P((lock_t));
-boolean_t lock_try_read_to_write __P((lock_t));
-boolean_t lock_try_write __P((lock_t));
-void lock_write __P((lock_t));
-void lock_write_to_read __P((lock_t));
#endif /* !_LOCK_H_ */
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 85f892f..25a2599 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm.h 8.2 (Berkeley) 12/13/93
+ * @(#)vm.h 8.5 (Berkeley) 5/11/95
*/
#ifndef VM_H
@@ -56,10 +56,18 @@ typedef struct vm_page *vm_page_t;
struct pager_struct;
typedef struct pager_struct *vm_pager_t;
+/*
+ * MACH VM locking type mappings to kernel types
+ */
+typedef struct simplelock simple_lock_data_t;
+typedef struct simplelock *simple_lock_t;
+typedef struct lock lock_data_t;
+typedef struct lock *lock_t;
+
#include <sys/vmmeter.h>
#include <sys/queue.h>
#include <vm/vm_param.h>
-#include <vm/lock.h>
+#include <sys/lock.h>
#include <vm/vm_prot.h>
#include <vm/vm_inherit.h>
#include <vm/vm_map.h>
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index bae5f00..e38f596 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
+ * @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
*/
struct buf;
@@ -47,7 +47,7 @@ void chgkprot __P((caddr_t, int, int));
#ifdef KERNEL
#ifdef TYPEDEF_FOR_UAP
-int getpagesize __P((struct proc *p, void *, int *));
+int compat_43_getpagesize __P((struct proc *p, void *, int *));
int madvise __P((struct proc *, void *, int *));
int mincore __P((struct proc *, void *, int *));
int mprotect __P((struct proc *, void *, int *));
@@ -59,12 +59,10 @@ int smmap __P((struct proc *, void *, int *));
int sstk __P((struct proc *, void *, int *));
#endif
-void assert_wait __P((int, boolean_t));
-int grow __P((struct proc *, u_int));
+void assert_wait __P((void *, boolean_t));
+int grow __P((struct proc *, vm_offset_t));
void iprintf __P((const char *, ...));
int kernacc __P((caddr_t, int, int));
-int kinfo_loadavg __P((int, char *, int *, int, int *));
-int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
@@ -75,7 +73,7 @@ vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
-void munmapfd __P((int));
+void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
int svm_allocate __P((struct proc *, void *, int *));
@@ -89,8 +87,8 @@ void swapout_threads __P((void));
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
void thread_block __P((void));
-void thread_sleep __P((int, simple_lock_t, boolean_t));
-void thread_wakeup __P((int));
+void thread_sleep __P((void *, simple_lock_t, boolean_t));
+void thread_wakeup __P((void *));
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t,
vm_offset_t *, vm_size_t, boolean_t));
@@ -112,6 +110,8 @@ int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
+int vm_sysctl __P((int *, u_int, void *, size_t *, void *,
+ size_t, struct proc *));
void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f60abf2..16b1b9a 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_fault.c 8.4 (Berkeley) 1/12/94
+ * @(#)vm_fault.c 8.5 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -893,7 +893,8 @@ vm_fault_wire(map, start, end)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, start, end)
+void
+vm_fault_unwire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
@@ -942,7 +943,8 @@ void vm_fault_unwire(map, start, end)
* entry corresponding to a main map entry that is wired down).
*/
-void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
+void
+vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
vm_map_t dst_map;
vm_map_t src_map;
vm_map_entry_t dst_entry;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 5676ff3..6db538c 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
+ * @(#)vm_glue.c 8.9 (Berkeley) 3/4/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -318,12 +318,13 @@ scheduler()
loop:
#ifdef DEBUG
while (!enableswap)
- sleep((caddr_t)&proc0, PVM);
+ tsleep((caddr_t)&proc0, PVM, "noswap", 0);
#endif
pp = NULL;
ppri = INT_MIN;
- for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
+ for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
+ /* XXX should also penalize based on vm_swrss */
pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
if (pri > ppri) {
pp = p;
@@ -333,13 +334,13 @@ loop:
}
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
- printf("sched: running, procp %x pri %d\n", pp, ppri);
+ printf("scheduler: running, procp %x pri %d\n", pp, ppri);
#endif
/*
* Nothing to do, back to sleep
*/
if ((p = pp) == NULL) {
- sleep((caddr_t)&proc0, PVM);
+ tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
goto loop;
}
@@ -347,6 +348,7 @@ loop:
* We would like to bring someone in.
* This part is really bogus cuz we could deadlock on memory
* despite our feeble check.
+ * XXX should require at least vm_swrss / 2
*/
size = round_page(ctob(UPAGES));
addr = (vm_offset_t) p->p_addr;
@@ -378,7 +380,7 @@ loop:
*/
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
- printf("sched: no room for pid %d(%s), free %d\n",
+ printf("scheduler: no room for pid %d(%s), free %d\n",
p->p_pid, p->p_comm, cnt.v_free_count);
#endif
(void) splhigh();
@@ -386,7 +388,7 @@ loop:
(void) spl0();
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
- printf("sched: room again, free %d\n", cnt.v_free_count);
+ printf("scheduler: room again, free %d\n", cnt.v_free_count);
#endif
goto loop;
}
@@ -418,7 +420,7 @@ swapout_threads()
#endif
outp = outp2 = NULL;
outpri = outpri2 = 0;
- for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
+ for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (!swappable(p))
continue;
switch (p->p_stat) {
@@ -527,7 +529,7 @@ swapout(p)
void
assert_wait(event, ruptible)
- int event;
+ void *event;
boolean_t ruptible;
{
#ifdef lint
@@ -542,35 +544,35 @@ thread_block()
int s = splhigh();
if (curproc->p_thread)
- sleep((caddr_t)curproc->p_thread, PVM);
+ tsleep(curproc->p_thread, PVM, "thrd_block", 0);
splx(s);
}
void
thread_sleep(event, lock, ruptible)
- int event;
+ void *event;
simple_lock_t lock;
boolean_t ruptible;
{
+ int s = splhigh();
+
#ifdef lint
ruptible++;
#endif
- int s = splhigh();
-
curproc->p_thread = event;
simple_unlock(lock);
if (curproc->p_thread)
- sleep((caddr_t)event, PVM);
+ tsleep(event, PVM, "thrd_sleep", 0);
splx(s);
}
void
thread_wakeup(event)
- int event;
+ void *event;
{
int s = splhigh();
- wakeup((caddr_t)event);
+ wakeup(event);
splx(s);
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 7e4db63..fbbc396 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
+ * @(#)vm_kern.c 8.4 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -80,8 +80,8 @@
* Allocate pageable memory to the kernel's address map.
* map must be "kernel_map" below.
*/
-
-vm_offset_t kmem_alloc_pageable(map, size)
+vm_offset_t
+kmem_alloc_pageable(map, size)
vm_map_t map;
register vm_size_t size;
{
@@ -109,7 +109,8 @@ vm_offset_t kmem_alloc_pageable(map, size)
* Allocate wired-down memory in the kernel's address map
* or a submap.
*/
-vm_offset_t kmem_alloc(map, size)
+vm_offset_t
+kmem_alloc(map, size)
register vm_map_t map;
register vm_size_t size;
{
@@ -201,7 +202,8 @@ vm_offset_t kmem_alloc(map, size)
* with kmem_alloc, and return the physical pages
* associated with that region.
*/
-void kmem_free(map, addr, size)
+void
+kmem_free(map, addr, size)
vm_map_t map;
register vm_offset_t addr;
vm_size_t size;
@@ -222,7 +224,8 @@ void kmem_free(map, addr, size)
* min, max Returned endpoints of map
* pageable Can the region be paged
*/
-vm_map_t kmem_suballoc(parent, min, max, size, pageable)
+vm_map_t
+kmem_suballoc(parent, min, max, size, pageable)
register vm_map_t parent;
vm_offset_t *min, *max;
register vm_size_t size;
@@ -381,7 +384,8 @@ kmem_malloc(map, size, canwait)
* has no room, the caller sleeps waiting for more memory in the submap.
*
*/
-vm_offset_t kmem_alloc_wait(map, size)
+vm_offset_t
+kmem_alloc_wait(map, size)
vm_map_t map;
vm_size_t size;
{
@@ -402,7 +406,7 @@ vm_offset_t kmem_alloc_wait(map, size)
vm_map_unlock(map);
return (0);
}
- assert_wait((int)map, TRUE);
+ assert_wait(map, TRUE);
vm_map_unlock(map);
thread_block();
}
@@ -417,14 +421,15 @@ vm_offset_t kmem_alloc_wait(map, size)
* Returns memory to a submap of the kernel, and wakes up any threads
* waiting for memory in that map.
*/
-void kmem_free_wakeup(map, addr, size)
+void
+kmem_free_wakeup(map, addr, size)
vm_map_t map;
vm_offset_t addr;
vm_size_t size;
{
vm_map_lock(map);
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
- thread_wakeup((int)map);
+ thread_wakeup(map);
vm_map_unlock(map);
}
@@ -434,7 +439,8 @@ void kmem_free_wakeup(map, addr, size)
* map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
* the range between `start' and `end' as free.
*/
-void kmem_init(start, end)
+void
+kmem_init(start, end)
vm_offset_t start, end;
{
register vm_map_t m;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 425fe0d..445ada6 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
+ * @(#)vm_map.c 8.9 (Berkeley) 5/17/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -72,7 +72,6 @@
#include <vm/vm.h>
#include <vm/vm_page.h>
-#include <vm/vm_object.h>
/*
* Virtual memory maps provide for the mapping, protection,
@@ -140,7 +139,8 @@ vm_map_t kmap_free;
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
-void vm_map_startup()
+void
+vm_map_startup()
{
register int i;
register vm_map_entry_t mep;
@@ -218,7 +218,8 @@ vmspace_free(vm)
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
-vm_map_t vm_map_create(pmap, min, max, pageable)
+vm_map_t
+vm_map_create(pmap, min, max, pageable)
pmap_t pmap;
vm_offset_t min, max;
boolean_t pageable;
@@ -228,9 +229,9 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
if (kmem_map == NULL) {
result = kmap_free;
- kmap_free = (vm_map_t) result->header.next;
if (result == NULL)
panic("vm_map_create: out of maps");
+ kmap_free = (vm_map_t) result->header.next;
} else
MALLOC(result, vm_map_t, sizeof(struct vm_map),
M_VMMAP, M_WAITOK);
@@ -262,7 +263,7 @@ vm_map_init(map, min, max, pageable)
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
- lock_init(&map->lock, TRUE);
+ lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
simple_lock_init(&map->ref_lock);
simple_lock_init(&map->hint_lock);
}
@@ -273,7 +274,8 @@ vm_map_init(map, min, max, pageable)
* Allocates a VM map entry for insertion.
* No entry fields are filled in. This routine is
*/
-vm_map_entry_t vm_map_entry_create(map)
+vm_map_entry_t
+vm_map_entry_create(map)
vm_map_t map;
{
vm_map_entry_t entry;
@@ -305,7 +307,8 @@ vm_map_entry_t vm_map_entry_create(map)
*
* Inverse of vm_map_entry_create.
*/
-void vm_map_entry_dispose(map, entry)
+void
+vm_map_entry_dispose(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
@@ -353,13 +356,18 @@ void vm_map_entry_dispose(map, entry)
* Creates another valid reference to the given map.
*
*/
-void vm_map_reference(map)
+void
+vm_map_reference(map)
register vm_map_t map;
{
if (map == NULL)
return;
simple_lock(&map->ref_lock);
+#ifdef DEBUG
+ if (map->ref_count == 0)
+ panic("vm_map_reference: zero ref_count");
+#endif
map->ref_count++;
simple_unlock(&map->ref_lock);
}
@@ -371,19 +379,17 @@ void vm_map_reference(map)
* destroying it if no references remain.
* The map should not be locked.
*/
-void vm_map_deallocate(map)
+void
+vm_map_deallocate(map)
register vm_map_t map;
{
- register int c;
if (map == NULL)
return;
simple_lock(&map->ref_lock);
- c = --map->ref_count;
- simple_unlock(&map->ref_lock);
-
- if (c > 0) {
+ if (--map->ref_count > 0) {
+ simple_unlock(&map->ref_lock);
return;
}
@@ -392,12 +398,14 @@ void vm_map_deallocate(map)
* to it.
*/
- vm_map_lock(map);
+ vm_map_lock_drain_interlock(map);
(void) vm_map_delete(map, map->min_offset, map->max_offset);
pmap_destroy(map->pmap);
+ vm_map_unlock(map);
+
FREE(map, M_VMMAP);
}
@@ -546,7 +554,8 @@ vm_map_insert(map, object, offset, start, end)
* result indicates whether the address is
* actually contained in the map.
*/
-boolean_t vm_map_lookup_entry(map, address, entry)
+boolean_t
+vm_map_lookup_entry(map, address, entry)
register vm_map_t map;
register vm_offset_t address;
vm_map_entry_t *entry; /* OUT */
@@ -714,7 +723,8 @@ vm_map_find(map, object, offset, addr, length, find_space)
* removing extra sharing maps
* [XXX maybe later] merging with a neighbor
*/
-void vm_map_simplify_entry(map, entry)
+void
+vm_map_simplify_entry(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
@@ -788,7 +798,8 @@ void vm_map_simplify_entry(map, entry)
* This routine is called only when it is known that
* the entry must be split.
*/
-static void _vm_map_clip_start(map, entry, start)
+static void
+_vm_map_clip_start(map, entry, start)
register vm_map_t map;
register vm_map_entry_t entry;
register vm_offset_t start;
@@ -842,7 +853,8 @@ static void _vm_map_clip_start(map, entry, start)
* This routine is called only when it is known that
* the entry must be split.
*/
-static void _vm_map_clip_end(map, entry, end)
+static void
+_vm_map_clip_end(map, entry, end)
register vm_map_t map;
register vm_map_entry_t entry;
register vm_offset_t end;
@@ -1182,7 +1194,7 @@ vm_map_pageable(map, start, end, new_pageable)
* If a region becomes completely unwired,
* unwire its physical pages and mappings.
*/
- lock_set_recursive(&map->lock);
+ vm_map_set_recursive(&map->lock);
entry = start_entry;
while ((entry != &map->header) && (entry->start < end)) {
@@ -1194,7 +1206,7 @@ vm_map_pageable(map, start, end, new_pageable)
entry = entry->next;
}
- lock_clear_recursive(&map->lock);
+ vm_map_clear_recursive(&map->lock);
}
else {
@@ -1303,8 +1315,8 @@ vm_map_pageable(map, start, end, new_pageable)
vm_map_unlock(map); /* trust me ... */
}
else {
- lock_set_recursive(&map->lock);
- lock_write_to_read(&map->lock);
+ vm_map_set_recursive(&map->lock);
+ lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc);
}
rv = 0;
@@ -1335,7 +1347,7 @@ vm_map_pageable(map, start, end, new_pageable)
vm_map_lock(map);
}
else {
- lock_clear_recursive(&map->lock);
+ vm_map_clear_recursive(&map->lock);
}
if (rv) {
vm_map_unlock(map);
@@ -1450,7 +1462,8 @@ vm_map_clean(map, start, end, syncio, invalidate)
* The map in question should be locked.
* [This is the reason for this routine's existence.]
*/
-void vm_map_entry_unwire(map, entry)
+void
+vm_map_entry_unwire(map, entry)
vm_map_t map;
register vm_map_entry_t entry;
{
@@ -1463,7 +1476,8 @@ void vm_map_entry_unwire(map, entry)
*
* Deallocate the given entry from the target map.
*/
-void vm_map_entry_delete(map, entry)
+void
+vm_map_entry_delete(map, entry)
register vm_map_t map;
register vm_map_entry_t entry;
{
@@ -1609,7 +1623,8 @@ vm_map_remove(map, start, end)
* privilege on the entire address region given.
* The entire region must be allocated.
*/
-boolean_t vm_map_check_protection(map, start, end, protection)
+boolean_t
+vm_map_check_protection(map, start, end, protection)
register vm_map_t map;
register vm_offset_t start;
register vm_offset_t end;
@@ -1659,7 +1674,8 @@ boolean_t vm_map_check_protection(map, start, end, protection)
* Copies the contents of the source entry to the destination
* entry. The entries *must* be aligned properly.
*/
-void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
+void
+vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
vm_map_t src_map, dst_map;
register vm_map_entry_t src_entry, dst_entry;
{
@@ -1855,7 +1871,7 @@ vm_map_copy(dst_map, src_map,
if (src_map == dst_map) {
vm_map_lock(src_map);
}
- else if ((int) src_map < (int) dst_map) {
+ else if ((long) src_map < (long) dst_map) {
vm_map_lock(src_map);
vm_map_lock(dst_map);
} else {
@@ -1984,7 +2000,7 @@ vm_map_copy(dst_map, src_map,
else {
new_src_map = src_map;
new_src_start = src_entry->start;
- lock_set_recursive(&src_map->lock);
+ vm_map_set_recursive(&src_map->lock);
}
if (dst_entry->is_a_map) {
@@ -2022,7 +2038,7 @@ vm_map_copy(dst_map, src_map,
else {
new_dst_map = dst_map;
new_dst_start = dst_entry->start;
- lock_set_recursive(&dst_map->lock);
+ vm_map_set_recursive(&dst_map->lock);
}
/*
@@ -2034,9 +2050,9 @@ vm_map_copy(dst_map, src_map,
FALSE, FALSE);
if (dst_map == new_dst_map)
- lock_clear_recursive(&dst_map->lock);
+ vm_map_clear_recursive(&dst_map->lock);
if (src_map == new_src_map)
- lock_clear_recursive(&src_map->lock);
+ vm_map_clear_recursive(&src_map->lock);
}
/*
@@ -2405,7 +2421,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
* share map to the new object.
*/
- if (lock_read_to_write(&share_map->lock)) {
+ if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+ (void *)0, curproc)) {
if (share_map != map)
vm_map_unlock_read(map);
goto RetryLookup;
@@ -2418,7 +2435,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
entry->needs_copy = FALSE;
- lock_write_to_read(&share_map->lock);
+ lockmgr(&share_map->lock, LK_DOWNGRADE,
+ (void *)0, curproc);
}
else {
/*
@@ -2435,7 +2453,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
*/
if (entry->object.vm_object == NULL) {
- if (lock_read_to_write(&share_map->lock)) {
+ if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
+ (void *)0, curproc)) {
if (share_map != map)
vm_map_unlock_read(map);
goto RetryLookup;
@@ -2444,7 +2463,7 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
entry->object.vm_object = vm_object_allocate(
(vm_size_t)(entry->end - entry->start));
entry->offset = 0;
- lock_write_to_read(&share_map->lock);
+ lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc);
}
/*
@@ -2480,7 +2499,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry,
* (according to the handle returned by that lookup).
*/
-void vm_map_lookup_done(map, entry)
+void
+vm_map_lookup_done(map, entry)
register vm_map_t map;
vm_map_entry_t entry;
{
@@ -2510,7 +2530,8 @@ void vm_map_lookup_done(map, entry)
* at allocation time because the adjacent entry
* is often wired down.
*/
-void vm_map_simplify(map, start)
+void
+vm_map_simplify(map, start)
vm_map_t map;
vm_offset_t start;
{
@@ -2558,7 +2579,8 @@ void vm_map_simplify(map, start)
/*
* vm_map_print: [ debug ]
*/
-void vm_map_print(map, full)
+void
+vm_map_print(map, full)
register vm_map_t map;
boolean_t full;
{
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index d25b7a2..27bb806 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
+ * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -159,14 +159,42 @@ typedef struct {
* Perform locking on the data portion of a map.
*/
+#include <sys/proc.h> /* XXX for curproc and p_pid */
+
+#define vm_map_lock_drain_interlock(map) { \
+ lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
+ &(map)->ref_lock, curproc); \
+ (map)->timestamp++; \
+}
+#ifdef DIAGNOSTIC
#define vm_map_lock(map) { \
- lock_write(&(map)->lock); \
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
+ panic("vm_map_lock: failed to get lock"); \
+ } \
(map)->timestamp++; \
}
-#define vm_map_unlock(map) lock_write_done(&(map)->lock)
-#define vm_map_lock_read(map) lock_read(&(map)->lock)
-#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
-
+#else
+#define vm_map_lock(map) { \
+ lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
+ (map)->timestamp++; \
+}
+#endif /* DIAGNOSTIC */
+#define vm_map_unlock(map) \
+ lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
+#define vm_map_lock_read(map) \
+ lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc)
+#define vm_map_unlock_read(map) \
+ lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
+#define vm_map_set_recursive(map) { \
+ simple_lock(&(map)->lk_interlock); \
+ (map)->lk_flags |= LK_CANRECURSE; \
+ simple_unlock(&(map)->lk_interlock); \
+}
+#define vm_map_clear_recursive(map) { \
+ simple_lock(&(map)->lk_interlock); \
+ (map)->lk_flags &= ~LK_CANRECURSE; \
+ simple_unlock(&(map)->lk_interlock); \
+}
/*
* Functions implemented as macros
*/
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 9db6f50..5b74a30 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
+ * @(#)vm_meter.c 8.7 (Berkeley) 5/10/95
*/
#include <sys/param.h>
@@ -76,7 +76,7 @@ loadav(avg)
register int i, nrun;
register struct proc *p;
- for (nrun = 0, p = (struct proc *)allproc; p != NULL; p = p->p_next) {
+ for (nrun = 0, p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
switch (p->p_stat) {
case SSLEEP:
if (p->p_priority > PZERO || p->p_slptime != 0)
@@ -152,7 +152,7 @@ vmtotal(totalp)
/*
* Calculate process statistics.
*/
- for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
+ for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_flag & P_SYSTEM)
continue;
switch (p->p_stat) {
@@ -184,15 +184,25 @@ vmtotal(totalp)
}
/*
* Note active objects.
+ *
+ * XXX don't count shadow objects with no resident pages.
+ * This eliminates the forced shadows caused by MAP_PRIVATE.
+ * Right now we require that such an object completely shadow
+ * the original, to catch just those cases.
*/
paging = 0;
for (map = &p->p_vmspace->vm_map, entry = map->header.next;
entry != &map->header; entry = entry->next) {
if (entry->is_a_map || entry->is_sub_map ||
- entry->object.vm_object == NULL)
+ (object = entry->object.vm_object) == NULL)
continue;
- entry->object.vm_object->flags |= OBJ_ACTIVE;
- paging |= entry->object.vm_object->paging_in_progress;
+ while (object->shadow &&
+ object->resident_page_count == 0 &&
+ object->shadow_offset == 0 &&
+ object->size == object->shadow->size)
+ object = object->shadow;
+ object->flags |= OBJ_ACTIVE;
+ paging |= object->paging_in_progress;
}
if (paging)
totalp->t_pw++;
@@ -220,5 +230,6 @@ vmtotal(totalp)
}
}
}
+ simple_unlock(&vm_object_list_lock);
totalp->t_free = cnt.v_free_count;
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 340cded..63280db 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -37,7 +37,7 @@
*
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
- * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
+ * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
*/
/*
@@ -54,6 +54,9 @@
#include <sys/mman.h>
#include <sys/conf.h>
+#include <sys/mount.h>
+#include <sys/syscallargs.h>
+
#include <miscfs/specfs/specdev.h>
#include <vm/vm.h>
@@ -67,30 +70,28 @@ int mmapdebug = 0;
#define MDB_MAPIT 0x04
#endif
-struct sbrk_args {
- int incr;
-};
/* ARGSUSED */
int
sbrk(p, uap, retval)
struct proc *p;
- struct sbrk_args *uap;
- int *retval;
+ struct sbrk_args /* {
+ syscallarg(int) incr;
+ } */ *uap;
+ register_t *retval;
{
/* Not yet implemented */
return (EOPNOTSUPP);
}
-struct sstk_args {
- int incr;
-};
/* ARGSUSED */
int
sstk(p, uap, retval)
struct proc *p;
- struct sstk_args *uap;
- int *retval;
+ struct sstk_args /* {
+ syscallarg(int) incr;
+ } */ *uap;
+ register_t *retval;
{
/* Not yet implemented */
@@ -98,15 +99,12 @@ sstk(p, uap, retval)
}
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
-struct getpagesize_args {
- int dummy;
-};
/* ARGSUSED */
int
-ogetpagesize(p, uap, retval)
+compat_43_getpagesize(p, uap, retval)
struct proc *p;
- struct getpagesize_args *uap;
- int *retval;
+ void *uap;
+ register_t *retval;
{
*retval = PAGE_SIZE;
@@ -114,32 +112,29 @@ ogetpagesize(p, uap, retval)
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
-struct mmap_args {
- caddr_t addr;
- size_t len;
- int prot;
- int flags;
- int fd;
- long pad;
- off_t pos;
-};
-
#ifdef COMPAT_43
-struct ommap_args {
- caddr_t addr;
- int len;
- int prot;
- int flags;
- int fd;
- long pos;
-};
int
-ommap(p, uap, retval)
+compat_43_mmap(p, uap, retval)
struct proc *p;
- register struct ommap_args *uap;
- int *retval;
+ register struct compat_43_mmap_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(int) len;
+ syscallarg(int) prot;
+ syscallarg(int) flags;
+ syscallarg(int) fd;
+ syscallarg(long) pos;
+ } */ *uap;
+ register_t *retval;
{
- struct mmap_args nargs;
+ struct mmap_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(size_t) len;
+ syscallarg(int) prot;
+ syscallarg(int) flags;
+ syscallarg(int) fd;
+ syscallarg(long) pad;
+ syscallarg(off_t) pos;
+ } */ nargs;
static const char cvtbsdprot[8] = {
0,
PROT_EXEC,
@@ -156,24 +151,24 @@ ommap(p, uap, retval)
#define OMAP_FIXED 0x0100
#define OMAP_INHERIT 0x0800
- nargs.addr = uap->addr;
- nargs.len = uap->len;
- nargs.prot = cvtbsdprot[uap->prot&0x7];
- nargs.flags = 0;
- if (uap->flags & OMAP_ANON)
- nargs.flags |= MAP_ANON;
- if (uap->flags & OMAP_COPY)
- nargs.flags |= MAP_COPY;
- if (uap->flags & OMAP_SHARED)
- nargs.flags |= MAP_SHARED;
+ SCARG(&nargs, addr) = SCARG(uap, addr);
+ SCARG(&nargs, len) = SCARG(uap, len);
+ SCARG(&nargs, prot) = cvtbsdprot[SCARG(uap, prot)&0x7];
+ SCARG(&nargs, flags) = 0;
+ if (SCARG(uap, flags) & OMAP_ANON)
+ SCARG(&nargs, flags) |= MAP_ANON;
+ if (SCARG(uap, flags) & OMAP_COPY)
+ SCARG(&nargs, flags) |= MAP_COPY;
+ if (SCARG(uap, flags) & OMAP_SHARED)
+ SCARG(&nargs, flags) |= MAP_SHARED;
else
- nargs.flags |= MAP_PRIVATE;
- if (uap->flags & OMAP_FIXED)
- nargs.flags |= MAP_FIXED;
- if (uap->flags & OMAP_INHERIT)
- nargs.flags |= MAP_INHERIT;
- nargs.fd = uap->fd;
- nargs.pos = uap->pos;
+ SCARG(&nargs, flags) |= MAP_PRIVATE;
+ if (SCARG(uap, flags) & OMAP_FIXED)
+ SCARG(&nargs, flags) |= MAP_FIXED;
+ if (SCARG(uap, flags) & OMAP_INHERIT)
+ SCARG(&nargs, flags) |= MAP_INHERIT;
+ SCARG(&nargs, fd) = SCARG(uap, fd);
+ SCARG(&nargs, pos) = SCARG(uap, pos);
return (mmap(p, &nargs, retval));
}
#endif
@@ -181,35 +176,52 @@ ommap(p, uap, retval)
int
mmap(p, uap, retval)
struct proc *p;
- register struct mmap_args *uap;
- int *retval;
+ register struct mmap_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(size_t) len;
+ syscallarg(int) prot;
+ syscallarg(int) flags;
+ syscallarg(int) fd;
+ syscallarg(long) pad;
+ syscallarg(off_t) pos;
+ } */ *uap;
+ register_t *retval;
{
register struct filedesc *fdp = p->p_fd;
register struct file *fp;
struct vnode *vp;
- vm_offset_t addr;
+ vm_offset_t addr, pos;
vm_size_t size;
vm_prot_t prot, maxprot;
caddr_t handle;
int flags, error;
- prot = uap->prot & VM_PROT_ALL;
- flags = uap->flags;
+ prot = SCARG(uap, prot) & VM_PROT_ALL;
+ flags = SCARG(uap, flags);
+ pos = SCARG(uap, pos);
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
- p->p_pid, uap->addr, uap->len, prot,
- flags, uap->fd, (vm_offset_t)uap->pos);
+ p->p_pid, SCARG(uap, addr), SCARG(uap, len), prot,
+ flags, SCARG(uap, fd), pos);
#endif
/*
* Address (if FIXED) must be page aligned.
* Size is implicitly rounded to a page boundary.
+ *
+ * XXX most (all?) vendors require that the file offset be
+ * page aligned as well. However, we already have applications
+ * (e.g. nlist) that rely on unrestricted alignment. Since we
+ * support it, let it happen.
*/
- addr = (vm_offset_t) uap->addr;
+ addr = (vm_offset_t) SCARG(uap, addr);
if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) ||
- (ssize_t)uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1))
+#if 0
+ ((flags & MAP_ANON) == 0 && (pos & PAGE_MASK)) ||
+#endif
+ (ssize_t)SCARG(uap, len) < 0 || ((flags & MAP_ANON) && SCARG(uap, fd) != -1))
return (EINVAL);
- size = (vm_size_t) round_page(uap->len);
+ size = (vm_size_t) round_page(SCARG(uap, len));
/*
* Check for illegal addresses. Watch out for address wrap...
* Note that VM_*_ADDRESS are not constants due to casts (argh).
@@ -223,13 +235,14 @@ mmap(p, uap, retval)
return (EINVAL);
}
/*
- * XXX if no hint provided for a non-fixed mapping place it after
- * the end of the largest possible heap.
+ * XXX for non-fixed mappings where no hint is provided or
+ * the hint would fall in the potential heap space,
+ * place it after the end of the largest possible heap.
*
* There should really be a pmap call to determine a reasonable
* location.
*/
- if (addr == 0 && (flags & MAP_FIXED) == 0)
+ else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
if (flags & MAP_ANON) {
/*
@@ -237,13 +250,14 @@ mmap(p, uap, retval)
*/
handle = NULL;
maxprot = VM_PROT_ALL;
+ pos = 0;
} else {
/*
* Mapping file, get fp for validation.
* Obtain vnode and make sure it is of appropriate type.
*/
- if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
- (fp = fdp->fd_ofiles[uap->fd]) == NULL)
+ if (((unsigned)SCARG(uap, fd)) >= fdp->fd_nfiles ||
+ (fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL)
return (EBADF);
if (fp->f_type != DTYPE_VNODE)
return (EINVAL);
@@ -284,21 +298,20 @@ mmap(p, uap, retval)
}
}
error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
- flags, handle, (vm_offset_t)uap->pos);
+ flags, handle, pos);
if (error == 0)
- *retval = (int)addr;
+ *retval = (register_t)addr;
return (error);
}
-struct msync_args {
- caddr_t addr;
- int len;
-};
int
msync(p, uap, retval)
struct proc *p;
- struct msync_args *uap;
- int *retval;
+ struct msync_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(int) len;
+ } */ *uap;
+ register_t *retval;
{
vm_offset_t addr;
vm_size_t size;
@@ -309,13 +322,14 @@ msync(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
printf("msync(%d): addr %x len %x\n",
- p->p_pid, uap->addr, uap->len);
+ p->p_pid, SCARG(uap, addr), SCARG(uap, len));
#endif
- if (((int)uap->addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
+ if (((vm_offset_t)SCARG(uap, addr) & PAGE_MASK) ||
+ SCARG(uap, addr) + SCARG(uap, len) < SCARG(uap, addr))
return (EINVAL);
map = &p->p_vmspace->vm_map;
- addr = (vm_offset_t)uap->addr;
- size = (vm_size_t)uap->len;
+ addr = (vm_offset_t)SCARG(uap, addr);
+ size = (vm_size_t)SCARG(uap, len);
/*
* XXX Gak! If size is zero we are supposed to sync "all modified
* pages with the region containing addr". Unfortunately, we
@@ -330,7 +344,7 @@ msync(p, uap, retval)
vm_map_lock_read(map);
rv = vm_map_lookup_entry(map, addr, &entry);
vm_map_unlock_read(map);
- if (rv)
+ if (!rv)
return (EINVAL);
addr = entry->start;
size = entry->end - entry->start;
@@ -368,15 +382,14 @@ msync(p, uap, retval)
return (0);
}
-struct munmap_args {
- caddr_t addr;
- int len;
-};
int
munmap(p, uap, retval)
register struct proc *p;
- register struct munmap_args *uap;
- int *retval;
+ register struct munmap_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(int) len;
+ } */ *uap;
+ register_t *retval;
{
vm_offset_t addr;
vm_size_t size;
@@ -385,13 +398,13 @@ munmap(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("munmap(%d): addr %x len %x\n",
- p->p_pid, uap->addr, uap->len);
+ p->p_pid, SCARG(uap, addr), SCARG(uap, len));
#endif
- addr = (vm_offset_t) uap->addr;
- if ((addr & PAGE_MASK) || uap->len < 0)
+ addr = (vm_offset_t) SCARG(uap, addr);
+ if ((addr & PAGE_MASK) || SCARG(uap, len) < 0)
return(EINVAL);
- size = (vm_size_t) round_page(uap->len);
+ size = (vm_size_t) round_page(SCARG(uap, len));
if (size == 0)
return(0);
/*
@@ -407,39 +420,42 @@ munmap(p, uap, retval)
map = &p->p_vmspace->vm_map;
/*
* Make sure entire range is allocated.
+ * XXX this seemed overly restrictive, so we relaxed it.
*/
+#if 0
if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
return(EINVAL);
+#endif
/* returns nothing but KERN_SUCCESS anyway */
(void) vm_map_remove(map, addr, addr+size);
return(0);
}
void
-munmapfd(fd)
+munmapfd(p, fd)
+ struct proc *p;
int fd;
{
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
- printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
+ printf("munmapfd(%d): fd %d\n", p->p_pid, fd);
#endif
/*
* XXX should vm_deallocate any regions mapped to this file
*/
- curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
+ p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
}
-struct mprotect_args {
- caddr_t addr;
- int len;
- int prot;
-};
int
mprotect(p, uap, retval)
struct proc *p;
- struct mprotect_args *uap;
- int *retval;
+ struct mprotect_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(int) len;
+ syscallarg(int) prot;
+ } */ *uap;
+ register_t *retval;
{
vm_offset_t addr;
vm_size_t size;
@@ -448,14 +464,14 @@ mprotect(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("mprotect(%d): addr %x len %x prot %d\n",
- p->p_pid, uap->addr, uap->len, uap->prot);
+ p->p_pid, SCARG(uap, addr), SCARG(uap, len), SCARG(uap, prot));
#endif
- addr = (vm_offset_t)uap->addr;
- if ((addr & PAGE_MASK) || uap->len < 0)
+ addr = (vm_offset_t)SCARG(uap, addr);
+ if ((addr & PAGE_MASK) || SCARG(uap, len) < 0)
return(EINVAL);
- size = (vm_size_t)uap->len;
- prot = uap->prot & VM_PROT_ALL;
+ size = (vm_size_t)SCARG(uap, len);
+ prot = SCARG(uap, prot) & VM_PROT_ALL;
switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
FALSE)) {
@@ -467,49 +483,46 @@ mprotect(p, uap, retval)
return (EINVAL);
}
-struct madvise_args {
- caddr_t addr;
- int len;
- int behav;
-};
/* ARGSUSED */
int
madvise(p, uap, retval)
struct proc *p;
- struct madvise_args *uap;
- int *retval;
+ struct madvise_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(int) len;
+ syscallarg(int) behav;
+ } */ *uap;
+ register_t *retval;
{
/* Not yet implemented */
return (EOPNOTSUPP);
}
-struct mincore_args {
- caddr_t addr;
- int len;
- char *vec;
-};
/* ARGSUSED */
int
mincore(p, uap, retval)
struct proc *p;
- struct mincore_args *uap;
- int *retval;
+ struct mincore_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(int) len;
+ syscallarg(char *) vec;
+ } */ *uap;
+ register_t *retval;
{
/* Not yet implemented */
return (EOPNOTSUPP);
}
-struct mlock_args {
- caddr_t addr;
- size_t len;
-};
int
mlock(p, uap, retval)
struct proc *p;
- struct mlock_args *uap;
- int *retval;
+ struct mlock_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(size_t) len;
+ } */ *uap;
+ register_t *retval;
{
vm_offset_t addr;
vm_size_t size;
@@ -519,12 +532,12 @@ mlock(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("mlock(%d): addr %x len %x\n",
- p->p_pid, uap->addr, uap->len);
+ p->p_pid, SCARG(uap, addr), SCARG(uap, len));
#endif
- addr = (vm_offset_t)uap->addr;
- if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
+ addr = (vm_offset_t)SCARG(uap, addr);
+ if ((addr & PAGE_MASK) || SCARG(uap, addr) + SCARG(uap, len) < SCARG(uap, addr))
return (EINVAL);
- size = round_page((vm_size_t)uap->len);
+ size = round_page((vm_size_t)SCARG(uap, len));
if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
#ifdef pmap_wired_count
@@ -540,15 +553,14 @@ mlock(p, uap, retval)
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
-struct munlock_args {
- caddr_t addr;
- size_t len;
-};
int
munlock(p, uap, retval)
struct proc *p;
- struct munlock_args *uap;
- int *retval;
+ struct munlock_args /* {
+ syscallarg(caddr_t) addr;
+ syscallarg(size_t) len;
+ } */ *uap;
+ register_t *retval;
{
vm_offset_t addr;
vm_size_t size;
@@ -557,16 +569,16 @@ munlock(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("munlock(%d): addr %x len %x\n",
- p->p_pid, uap->addr, uap->len);
+ p->p_pid, SCARG(uap, addr), SCARG(uap, len));
#endif
- addr = (vm_offset_t)uap->addr;
- if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
+ addr = (vm_offset_t)SCARG(uap, addr);
+ if ((addr & PAGE_MASK) || SCARG(uap, addr) + SCARG(uap, len) < SCARG(uap, addr))
return (EINVAL);
#ifndef pmap_wired_count
if (error = suser(p->p_ucred, &p->p_acflag))
return (error);
#endif
- size = round_page((vm_size_t)uap->len);
+ size = round_page((vm_size_t)SCARG(uap, len));
error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index d11fa8b..dbd01e7 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_object.c 8.5 (Berkeley) 3/22/94
+ * @(#)vm_object.c 8.7 (Berkeley) 5/11/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -117,7 +117,8 @@ static void _vm_object_allocate __P((vm_size_t, vm_object_t));
*
* Initialize the VM objects module.
*/
-void vm_object_init(size)
+void
+vm_object_init(size)
vm_size_t size;
{
register int i;
@@ -144,7 +145,8 @@ void vm_object_init(size)
* Returns a new object with the given size.
*/
-vm_object_t vm_object_allocate(size)
+vm_object_t
+vm_object_allocate(size)
vm_size_t size;
{
register vm_object_t result;
@@ -192,7 +194,8 @@ _vm_object_allocate(size, object)
*
* Gets another reference to the given object.
*/
-void vm_object_reference(object)
+void
+vm_object_reference(object)
register vm_object_t object;
{
if (object == NULL)
@@ -214,7 +217,8 @@ void vm_object_reference(object)
*
* No object may be locked.
*/
-void vm_object_deallocate(object)
+void
+vm_object_deallocate(object)
register vm_object_t object;
{
vm_object_t temp;
@@ -284,7 +288,8 @@ void vm_object_deallocate(object)
*
* The object must be locked.
*/
-void vm_object_terminate(object)
+void
+vm_object_terminate(object)
register vm_object_t object;
{
register vm_page_t p;
@@ -309,7 +314,7 @@ void vm_object_terminate(object)
* Wait until the pageout daemon is through with the object.
*/
while (object->paging_in_progress) {
- vm_object_sleep((int)object, object, FALSE);
+ vm_object_sleep(object, object, FALSE);
vm_object_lock(object);
}
@@ -319,10 +324,8 @@ void vm_object_terminate(object)
*
* XXX need to do something in the event of a cleaning error.
*/
- if ((object->flags & OBJ_INTERNAL) == 0) {
+ if ((object->flags & OBJ_INTERNAL) == 0)
(void) vm_object_page_clean(object, 0, 0, TRUE, TRUE);
- vm_object_unlock(object);
- }
/*
* Now free the pages.
@@ -335,8 +338,7 @@ void vm_object_terminate(object)
cnt.v_pfree++;
vm_page_unlock_queues();
}
- if ((object->flags & OBJ_INTERNAL) == 0)
- vm_object_unlock(object);
+ vm_object_unlock(object);
/*
* Let the pager know object is dead.
@@ -414,7 +416,7 @@ again:
* Wait until the pageout daemon is through with the object.
*/
while (object->paging_in_progress) {
- vm_object_sleep((int)object, object, FALSE);
+ vm_object_sleep(object, object, FALSE);
vm_object_lock(object);
}
/*
@@ -548,7 +550,8 @@ vm_object_cache_trim()
*
* The object must *not* be locked.
*/
-void vm_object_pmap_copy(object, start, end)
+void
+vm_object_pmap_copy(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
@@ -576,7 +579,8 @@ void vm_object_pmap_copy(object, start, end)
*
* The object must *not* be locked.
*/
-void vm_object_pmap_remove(object, start, end)
+void
+vm_object_pmap_remove(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
@@ -604,7 +608,8 @@ void vm_object_pmap_remove(object, start, end)
* May defer the copy until later if the object is not backed
* up by a non-default pager.
*/
-void vm_object_copy(src_object, src_offset, size,
+void
+vm_object_copy(src_object, src_offset, size,
dst_object, dst_offset, src_needs_copy)
register vm_object_t src_object;
vm_offset_t src_offset;
@@ -798,7 +803,8 @@ void vm_object_copy(src_object, src_offset, size,
* are returned in the source parameters.
*/
-void vm_object_shadow(object, offset, length)
+void
+vm_object_shadow(object, offset, length)
vm_object_t *object; /* IN/OUT */
vm_offset_t *offset; /* IN/OUT */
vm_size_t length;
@@ -843,7 +849,8 @@ void vm_object_shadow(object, offset, length)
* Set the specified object's pager to the specified pager.
*/
-void vm_object_setpager(object, pager, paging_offset,
+void
+vm_object_setpager(object, pager, paging_offset,
read_only)
vm_object_t object;
vm_pager_t pager;
@@ -865,14 +872,15 @@ void vm_object_setpager(object, pager, paging_offset,
*/
#define vm_object_hash(pager) \
- (((unsigned)pager)%VM_OBJECT_HASH_COUNT)
+ (((unsigned long)pager)%VM_OBJECT_HASH_COUNT)
/*
* vm_object_lookup looks in the object cache for an object with the
* specified pager and paging id.
*/
-vm_object_t vm_object_lookup(pager)
+vm_object_t
+vm_object_lookup(pager)
vm_pager_t pager;
{
register vm_object_hash_entry_t entry;
@@ -907,7 +915,8 @@ vm_object_t vm_object_lookup(pager)
* the hash table.
*/
-void vm_object_enter(object, pager)
+void
+vm_object_enter(object, pager)
vm_object_t object;
vm_pager_t pager;
{
@@ -969,8 +978,8 @@ vm_object_remove(pager)
* vm_object_cache_clear removes all objects from the cache.
*
*/
-
-void vm_object_cache_clear()
+void
+vm_object_cache_clear()
{
register vm_object_t object;
@@ -1009,7 +1018,8 @@ boolean_t vm_object_collapse_allowed = TRUE;
* queues be unlocked.
*
*/
-void vm_object_collapse(object)
+void
+vm_object_collapse(object)
register vm_object_t object;
{
@@ -1277,7 +1287,8 @@ void vm_object_collapse(object)
*
* The object must be locked.
*/
-void vm_object_page_remove(object, start, end)
+void
+vm_object_page_remove(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
@@ -1320,7 +1331,8 @@ void vm_object_page_remove(object, start, end)
* Conditions:
* The object must *not* be locked.
*/
-boolean_t vm_object_coalesce(prev_object, next_object,
+boolean_t
+vm_object_coalesce(prev_object, next_object,
prev_offset, next_offset,
prev_size, next_size)
@@ -1391,7 +1403,8 @@ boolean_t vm_object_coalesce(prev_object, next_object,
/*
* vm_object_print: [ debug ]
*/
-void vm_object_print(object, full)
+void
+vm_object_print(object, full)
vm_object_t object;
boolean_t full;
{
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 5e220ac..f46a355 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_object.h 8.3 (Berkeley) 1/12/94
+ * @(#)vm_object.h 8.4 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -108,7 +108,7 @@ TAILQ_HEAD(vm_object_hash_head, vm_object_hash_entry);
struct vm_object_hash_entry {
TAILQ_ENTRY(vm_object_hash_entry) hash_links; /* hash chain links */
- vm_object_t object; /* object represened */
+ vm_object_t object; /* object represented */
};
typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
@@ -137,7 +137,7 @@ vm_object_t kmem_object;
#define vm_object_unlock(object) simple_unlock(&(object)->Lock)
#define vm_object_lock_try(object) simple_lock_try(&(object)->Lock)
#define vm_object_sleep(event, object, interruptible) \
- thread_sleep((event), &(object)->Lock, (interruptible))
+ thread_sleep((event), &(object)->Lock, (interruptible))
#ifdef KERNEL
vm_object_t vm_object_allocate __P((vm_size_t));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 0cd9d87..6b5574b 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
+ * @(#)vm_page.c 8.4 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -110,7 +110,8 @@ int page_shift;
*
* Sets page_shift and page_mask from cnt.v_page_size.
*/
-void vm_set_page_size()
+void
+vm_set_page_size()
{
if (cnt.v_page_size == 0)
@@ -133,7 +134,8 @@ void vm_set_page_size()
* for the object/offset-to-page hash table headers.
* Each page cell is initialized and placed on the free list.
*/
-void vm_page_startup(start, end)
+void
+vm_page_startup(start, end)
vm_offset_t *start;
vm_offset_t *end;
{
@@ -290,7 +292,7 @@ void vm_page_startup(start, end)
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
#define vm_page_hash(object, offset) \
- (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
+ (((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
/*
* vm_page_insert: [ internal use only ]
@@ -301,7 +303,8 @@ void vm_page_startup(start, end)
* The object and page must be locked.
*/
-void vm_page_insert(mem, object, offset)
+void
+vm_page_insert(mem, object, offset)
register vm_page_t mem;
register vm_object_t object;
register vm_offset_t offset;
@@ -357,7 +360,8 @@ void vm_page_insert(mem, object, offset)
* The object and page must be locked.
*/
-void vm_page_remove(mem)
+void
+vm_page_remove(mem)
register vm_page_t mem;
{
register struct pglist *bucket;
@@ -404,7 +408,8 @@ void vm_page_remove(mem)
* The object must be locked. No side effects.
*/
-vm_page_t vm_page_lookup(object, offset)
+vm_page_t
+vm_page_lookup(object, offset)
register vm_object_t object;
register vm_offset_t offset;
{
@@ -442,7 +447,8 @@ vm_page_t vm_page_lookup(object, offset)
*
* The object must be locked.
*/
-void vm_page_rename(mem, new_object, new_offset)
+void
+vm_page_rename(mem, new_object, new_offset)
register vm_page_t mem;
register vm_object_t new_object;
vm_offset_t new_offset;
@@ -465,7 +471,8 @@ void vm_page_rename(mem, new_object, new_offset)
*
* Object must be locked.
*/
-vm_page_t vm_page_alloc(object, offset)
+vm_page_t
+vm_page_alloc(object, offset)
vm_object_t object;
vm_offset_t offset;
{
@@ -503,7 +510,7 @@ vm_page_t vm_page_alloc(object, offset)
if (cnt.v_free_count < cnt.v_free_min ||
(cnt.v_free_count < cnt.v_free_target &&
cnt.v_inactive_count < cnt.v_inactive_target))
- thread_wakeup((int)&vm_pages_needed);
+ thread_wakeup(&vm_pages_needed);
return (mem);
}
@@ -515,7 +522,8 @@ vm_page_t vm_page_alloc(object, offset)
*
* Object and page must be locked prior to entry.
*/
-void vm_page_free(mem)
+void
+vm_page_free(mem)
register vm_page_t mem;
{
vm_page_remove(mem);
@@ -553,7 +561,8 @@ void vm_page_free(mem)
*
* The page queues must be locked.
*/
-void vm_page_wire(mem)
+void
+vm_page_wire(mem)
register vm_page_t mem;
{
VM_PAGE_CHECK(mem);
@@ -582,7 +591,8 @@ void vm_page_wire(mem)
*
* The page queues must be locked.
*/
-void vm_page_unwire(mem)
+void
+vm_page_unwire(mem)
register vm_page_t mem;
{
VM_PAGE_CHECK(mem);
@@ -605,7 +615,8 @@ void vm_page_unwire(mem)
*
* The page queues must be locked.
*/
-void vm_page_deactivate(m)
+void
+vm_page_deactivate(m)
register vm_page_t m;
{
VM_PAGE_CHECK(m);
@@ -640,7 +651,8 @@ void vm_page_deactivate(m)
* The page queues must be locked.
*/
-void vm_page_activate(m)
+void
+vm_page_activate(m)
register vm_page_t m;
{
VM_PAGE_CHECK(m);
@@ -668,7 +680,8 @@ void vm_page_activate(m)
* be used by the zero-fill object.
*/
-boolean_t vm_page_zero_fill(m)
+boolean_t
+vm_page_zero_fill(m)
vm_page_t m;
{
VM_PAGE_CHECK(m);
@@ -684,7 +697,8 @@ boolean_t vm_page_zero_fill(m)
* Copy one page to another
*/
-void vm_page_copy(src_m, dest_m)
+void
+vm_page_copy(src_m, dest_m)
vm_page_t src_m;
vm_page_t dest_m;
{
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 8bf5146..f9bf115 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_page.h 8.2 (Berkeley) 12/13/93
+ * @(#)vm_page.h 8.3 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -202,14 +202,14 @@ simple_lock_data_t vm_page_queue_free_lock;
#define PAGE_ASSERT_WAIT(m, interruptible) { \
(m)->flags |= PG_WANTED; \
- assert_wait((int) (m), (interruptible)); \
+ assert_wait((m), (interruptible)); \
}
#define PAGE_WAKEUP(m) { \
(m)->flags &= ~PG_BUSY; \
if ((m)->flags & PG_WANTED) { \
(m)->flags &= ~PG_WANTED; \
- thread_wakeup((int) (m)); \
+ thread_wakeup((m)); \
} \
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 6795405..c312fa6 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
+ * @(#)vm_pageout.c 8.7 (Berkeley) 6/19/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -202,7 +202,7 @@ vm_pageout_scan()
else
#endif
vm_pageout_page(m, object);
- thread_wakeup((int) object);
+ thread_wakeup(object);
vm_object_unlock(object);
/*
* Former next page may no longer even be on the inactive
@@ -276,7 +276,7 @@ vm_pageout_page(m, object)
/*
* Do a wakeup here in case the following operations block.
*/
- thread_wakeup((int) &cnt.v_free_count);
+ thread_wakeup(&cnt.v_free_count);
/*
* If there is no pager for the page, use the default pager.
@@ -321,7 +321,11 @@ vm_pageout_page(m, object)
* shortage, so we put pause for awhile and try again.
* XXX could get stuck here.
*/
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
(void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
+ vm_object_lock(object);
+ vm_page_lock_queues();
break;
}
case VM_PAGER_FAIL:
@@ -440,7 +444,7 @@ vm_pageout_cluster(m, object)
object->paging_in_progress++;
vm_object_unlock(object);
again:
- thread_wakeup((int) &cnt.v_free_count);
+ thread_wakeup(&cnt.v_free_count);
postatus = vm_pager_put_pages(object->pager, plistp, count, FALSE);
/*
* XXX rethink this
@@ -505,7 +509,8 @@ again:
* vm_pageout is the high level pageout daemon.
*/
-void vm_pageout()
+void
+vm_pageout()
{
(void) spl0();
@@ -540,8 +545,7 @@ void vm_pageout()
simple_lock(&vm_pages_needed_lock);
while (TRUE) {
- thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
- FALSE);
+ thread_sleep(&vm_pages_needed, &vm_pages_needed_lock, FALSE);
/*
* Compute the inactive target for this scan.
* We need to keep a reasonable amount of memory in the
@@ -562,6 +566,6 @@ void vm_pageout()
vm_pageout_scan();
vm_pager_sync();
simple_lock(&vm_pages_needed_lock);
- thread_wakeup((int) &cnt.v_free_count);
+ thread_wakeup(&cnt.v_free_count);
}
}
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index a82a0ea..087cf54 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_pageout.h 8.2 (Berkeley) 1/12/94
+ * @(#)vm_pageout.h 8.3 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -84,8 +84,8 @@ simple_lock_data_t vm_pages_needed_lock;
#define VM_WAIT { \
simple_lock(&vm_pages_needed_lock); \
- thread_wakeup((int)&vm_pages_needed); \
- thread_sleep((int)&cnt.v_free_count, \
+ thread_wakeup(&vm_pages_needed); \
+ thread_sleep(&cnt.v_free_count, \
&vm_pages_needed_lock, FALSE); \
}
#ifdef KERNEL
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 7123abb..dd74297 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
+ * @(#)vm_pager.c 8.7 (Berkeley) 7/7/94
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -207,6 +207,26 @@ vm_pager_put_pages(pager, mlist, npages, sync)
return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync));
}
+/* XXX compatibility*/
+int
+vm_pager_get(pager, m, sync)
+ vm_pager_t pager;
+ vm_page_t m;
+ boolean_t sync;
+{
+ return vm_pager_get_pages(pager, &m, 1, sync);
+}
+
+/* XXX compatibility*/
+int
+vm_pager_put(pager, m, sync)
+ vm_pager_t pager;
+ vm_page_t m;
+ boolean_t sync;
+{
+ return vm_pager_put_pages(pager, &m, 1, sync);
+}
+
boolean_t
vm_pager_has_page(pager, offset)
vm_pager_t pager;
@@ -240,7 +260,7 @@ vm_pager_cluster(pager, offset, loff, hoff)
{
if (pager == NULL)
panic("vm_pager_cluster: null pager");
- return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
+ ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
}
void
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index e4659c2..ffd8eb0 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
+ * @(#)vm_pager.h 8.5 (Berkeley) 7/7/94
*/
/*
@@ -130,18 +130,8 @@ void vm_pager_unmap_pages __P((vm_offset_t, int));
/*
* XXX compat with old interface
*/
-#define vm_pager_get(p, m, s) \
-({ \
- vm_page_t ml[1]; \
- ml[0] = (m); \
- vm_pager_get_pages(p, ml, 1, s); \
-})
-#define vm_pager_put(p, m, s) \
-({ \
- vm_page_t ml[1]; \
- ml[0] = (m); \
- vm_pager_put_pages(p, ml, 1, s); \
-})
+int vm_pager_get __P((vm_pager_t, vm_page_t, boolean_t));
+int vm_pager_put __P((vm_pager_t, vm_page_t, boolean_t));
#endif
#endif /* _VM_PAGER_ */
diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h
index 2d2c715..81d3d12 100644
--- a/sys/vm/vm_param.h
+++ b/sys/vm/vm_param.h
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_param.h 8.1 (Berkeley) 6/11/93
+ * @(#)vm_param.h 8.2 (Berkeley) 1/9/95
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -82,7 +82,7 @@ typedef int boolean_t;
* The machine independent pages are refered to as PAGES. A page
* is some number of hardware pages, depending on the target machine.
*/
-#define DEFAULT_PAGE_SIZE 4096
+#define DEFAULT_PAGE_SIZE 4096
/*
* All references to the size of a page should be done with PAGE_SIZE
@@ -90,8 +90,8 @@ typedef int boolean_t;
* we can easily make them constant if we so desire.
*/
#define PAGE_SIZE cnt.v_page_size /* size of page */
-#define PAGE_MASK page_mask /* size of page - 1 */
-#define PAGE_SHIFT page_shift /* bits to shift for pages */
+#define PAGE_MASK page_mask /* size of page - 1 */
+#define PAGE_SHIFT page_shift /* bits to shift for pages */
#ifdef KERNEL
extern vm_size_t page_mask;
extern int page_shift;
@@ -100,11 +100,11 @@ extern int page_shift;
/*
* CTL_VM identifiers
*/
-#define VM_METER 1 /* struct vmmeter */
-#define VM_LOADAVG 2 /* struct loadavg */
-#define VM_MAXID 3 /* number of valid vm ids */
+#define VM_METER 1 /* struct vmmeter */
+#define VM_LOADAVG 2 /* struct loadavg */
+#define VM_MAXID 3 /* number of valid vm ids */
-#define CTL_VM_NAMES { \
+#define CTL_VM_NAMES { \
{ 0, 0 }, \
{ "vmmeter", CTLTYPE_STRUCT }, \
{ "loadavg", CTLTYPE_STRUCT }, \
@@ -129,18 +129,18 @@ extern int page_shift;
* No rounding is used.
*/
#ifdef KERNEL
-#define atop(x) (((unsigned)(x)) >> PAGE_SHIFT)
+#define atop(x) (((unsigned long)(x)) >> PAGE_SHIFT)
#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
/*
* Round off or truncate to the nearest page. These will work
* for either addresses or counts (i.e., 1 byte rounds to 1 page).
*/
-#define round_page(x) \
+#define round_page(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
-#define trunc_page(x) \
+#define trunc_page(x) \
((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
-#define num_pages(x) \
+#define num_pages(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT))
extern vm_size_t mem_size; /* size of physical memory (bytes) */
@@ -150,7 +150,8 @@ extern vm_offset_t last_addr; /* last physical page */
#else
/* out-of-kernel versions of round_page and trunc_page */
#define round_page(x) \
- ((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size)
+ ((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * \
+ vm_page_size)
#define trunc_page(x) \
((((vm_offset_t)(x)) / vm_page_size) * vm_page_size)
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 3d49ea7..0902185 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -37,7 +37,7 @@
*
* from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
*
- * @(#)vm_unix.c 8.1 (Berkeley) 6/11/93
+ * @(#)vm_unix.c 8.2 (Berkeley) 1/9/95
*/
/*
@@ -97,7 +97,7 @@ obreak(p, uap, retval)
int
grow(p, sp)
struct proc *p;
- unsigned sp;
+ vm_offset_t sp;
{
register struct vmspace *vm = p->p_vmspace;
register int si;
@@ -105,7 +105,7 @@ grow(p, sp)
/*
* For user defined stacks (from sendsig).
*/
- if (sp < (unsigned)vm->vm_maxsaddr)
+ if (sp < (vm_offset_t)vm->vm_maxsaddr)
return (0);
/*
* For common case of already allocated (from trap).
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 9c2f826..993fd8a 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
+ * @(#)vnode_pager.c 8.10 (Berkeley) 5/14/95
*/
/*
@@ -277,7 +277,8 @@ vnode_pager_haspage(pager, offset)
vm_pager_t pager;
vm_offset_t offset;
{
- register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
+ struct proc *p = curproc; /* XXX */
+ vn_pager_t vnp = (vn_pager_t)pager->pg_data;
daddr_t bn;
int err;
@@ -291,9 +292,9 @@ vnode_pager_haspage(pager, offset)
* Lock the vnode first to make sure we have the most recent
* version of the size.
*/
- VOP_LOCK(vnp->vnp_vp);
+ vn_lock(vnp->vnp_vp, LK_EXCLUSIVE | LK_RETRY, p);
if (offset >= vnp->vnp_size) {
- VOP_UNLOCK(vnp->vnp_vp);
+ VOP_UNLOCK(vnp->vnp_vp, 0, p);
#ifdef DEBUG
if (vpagerdebug & (VDB_FAIL|VDB_SIZE))
printf("vnode_pager_haspage: pg %x, off %x, size %x\n",
@@ -312,7 +313,7 @@ vnode_pager_haspage(pager, offset)
err = VOP_BMAP(vnp->vnp_vp,
offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
(struct vnode **)0, &bn, NULL);
- VOP_UNLOCK(vnp->vnp_vp);
+ VOP_UNLOCK(vnp->vnp_vp, 0, p);
if (err) {
#ifdef DEBUG
if (vpagerdebug & VDB_FAIL)
@@ -422,7 +423,8 @@ void
vnode_pager_umount(mp)
register struct mount *mp;
{
- register vm_pager_t pager, npager;
+ struct proc *p = curproc; /* XXX */
+ vm_pager_t pager, npager;
struct vnode *vp;
for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager){
@@ -433,9 +435,9 @@ vnode_pager_umount(mp)
npager = pager->pg_list.tqe_next;
vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
if (mp == (struct mount *)0 || vp->v_mount == mp) {
- VOP_LOCK(vp);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
(void) vnode_pager_uncache(vp);
- VOP_UNLOCK(vp);
+ VOP_UNLOCK(vp, 0, p);
}
}
}
@@ -452,15 +454,15 @@ boolean_t
vnode_pager_uncache(vp)
register struct vnode *vp;
{
- register vm_object_t object;
+ struct proc *p = curproc; /* XXX */
+ vm_object_t object;
boolean_t uncached;
vm_pager_t pager;
/*
* Not a mapped vnode
*/
- pager = (vm_pager_t)vp->v_vmdata;
- if (pager == NULL)
+ if (vp->v_type != VREG || (pager = (vm_pager_t)vp->v_vmdata) == NULL)
return (TRUE);
#ifdef DEBUG
if (!VOP_ISLOCKED(vp)) {
@@ -477,9 +479,9 @@ vnode_pager_uncache(vp)
object = vm_object_lookup(pager);
if (object) {
uncached = (object->ref_count <= 1);
- VOP_UNLOCK(vp);
+ VOP_UNLOCK(vp, 0, p);
pager_cache(object, FALSE);
- VOP_LOCK(vp);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
} else
uncached = TRUE;
return(uncached);
@@ -525,9 +527,9 @@ vnode_pager_io(vnp, mlist, npages, sync, rw)
* read beyond EOF (returns error)
* short read
*/
- VOP_LOCK(vnp->vnp_vp);
+ vn_lock(vnp->vnp_vp, LK_EXCLUSIVE | LK_RETRY, p);
if (foff >= vnp->vnp_size) {
- VOP_UNLOCK(vnp->vnp_vp);
+ VOP_UNLOCK(vnp->vnp_vp, 0, p);
vm_pager_unmap_pages(kva, npages);
#ifdef DEBUG
if (vpagerdebug & VDB_SIZE)
@@ -558,7 +560,7 @@ vnode_pager_io(vnp, mlist, npages, sync, rw)
error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred);
else
error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred);
- VOP_UNLOCK(vnp->vnp_vp);
+ VOP_UNLOCK(vnp->vnp_vp, 0, p);
#ifdef DEBUG
if (vpagerdebug & VDB_IO) {
if (error || auio.uio_resid)
OpenPOWER on IntegriCloud