summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_glue.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_glue.c')
-rw-r--r--sys/vm/vm_glue.c500
1 files changed, 290 insertions, 210 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 5676ff3..f181ab0 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -67,16 +67,22 @@
#include <sys/buf.h>
#include <sys/user.h>
+#include <sys/kernel.h>
+#include <sys/dkstat.h>
+
#include <vm/vm.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
-#include <machine/cpu.h>
+#include <machine/stdarg.h>
+extern char kstack[];
int avefree = 0; /* XXX */
-unsigned maxdmap = MAXDSIZ; /* XXX */
int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
+/* vm_map_t upages_map; */
+void swapout(struct proc *p);
int
kernacc(addr, len, rw)
caddr_t addr;
@@ -89,18 +95,6 @@ kernacc(addr, len, rw)
saddr = trunc_page(addr);
eaddr = round_page(addr+len);
rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
- /*
- * XXX there are still some things (e.g. the buffer cache) that
- * are managed behind the VM system's back so even though an
- * address is accessible in the mind of the VM system, there may
- * not be physical pages where the VM thinks there is. This can
- * lead to bogus allocation of pages in the kernel address space
- * or worse, inconsistencies at the pmap level. We only worry
- * about the buffer cache for now.
- */
- if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers &&
- saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf))
- rv = FALSE;
return(rv == TRUE);
}
@@ -112,6 +106,23 @@ useracc(addr, len, rw)
boolean_t rv;
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
+ /*
+ * XXX - specially disallow access to user page tables - they are
+ * in the map.
+ *
+ * XXX - don't specially disallow access to the user area - treat
+ * it as incorrectly as elsewhere.
+ *
+ * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was
+ * only used (as an end address) in trap.c. Use it as an end
+ * address here too.
+ */
+ if ((vm_offset_t) addr >= VM_MAXUSER_ADDRESS
+ || (vm_offset_t) addr + len > VM_MAXUSER_ADDRESS
+ || (vm_offset_t) addr + len <= (vm_offset_t) addr) {
+ return (FALSE);
+ }
+
rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
trunc_page(addr), round_page(addr+len), prot);
return(rv == TRUE);
@@ -121,40 +132,18 @@ useracc(addr, len, rw)
/*
* Change protections on kernel pages from addr to addr+len
* (presumably so debugger can plant a breakpoint).
- *
- * We force the protection change at the pmap level. If we were
- * to use vm_map_protect a change to allow writing would be lazily-
- * applied meaning we would still take a protection fault, something
- * we really don't want to do. It would also fragment the kernel
- * map unnecessarily. We cannot use pmap_protect since it also won't
- * enforce a write-enable request. Using pmap_enter is the only way
- * we can ensure the change takes place properly.
+ * All addresses are assumed to reside in the Sysmap,
*/
-void
chgkprot(addr, len, rw)
register caddr_t addr;
int len, rw;
{
- vm_prot_t prot;
- vm_offset_t pa, sva, eva;
-
- prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
- eva = round_page(addr + len);
- for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
- /*
- * Extract physical address for the page.
- * We use a cheezy hack to differentiate physical
- * page 0 from an invalid mapping, not that it
- * really matters...
- */
- pa = pmap_extract(kernel_pmap, sva|1);
- if (pa == 0)
- panic("chgkprot: invalid page");
- pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE);
- }
+ vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
+
+ vm_map_protect(kernel_map, trunc_page(addr),
+ round_page(addr+len), prot, FALSE);
}
#endif
-
void
vslock(addr, len)
caddr_t addr;
@@ -172,8 +161,8 @@ vsunlock(addr, len, dirtied)
{
#ifdef lint
dirtied++;
-#endif
- vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
+#endif lint
+ vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr+len), TRUE);
}
@@ -194,16 +183,19 @@ vm_fork(p1, p2, isvfork)
int isvfork;
{
register struct user *up;
- vm_offset_t addr;
+ vm_offset_t addr, ptaddr;
+ int i;
+ struct vm_map *vp;
+
+ while( cnt.v_free_count < cnt.v_free_min)
+ VM_WAIT;
-#ifdef i386
/*
* avoid copying any of the parent's pagetables or other per-process
* objects that reside in the map by marking all of them non-inheritable
*/
(void)vm_map_inherit(&p1->p_vmspace->vm_map,
- UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
-#endif
+ UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
#ifdef SYSVSHM
@@ -211,23 +203,40 @@ vm_fork(p1, p2, isvfork)
shmfork(p1, p2, isvfork);
#endif
-#ifndef i386
/*
* Allocate a wired-down (for now) pcb and kernel stack for the process
*/
- addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
- if (addr == 0)
- panic("vm_fork: no more kernel virtual memory");
- vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
-#else
-/* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack,
-and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is
-not yet clear, yet it does... */
- addr = kmem_alloc(kernel_map, ctob(UPAGES));
- if (addr == 0)
- panic("vm_fork: no more kernel virtual memory");
-#endif
- up = (struct user *)addr;
+
+ addr = (vm_offset_t) kstack;
+
+ vp = &p2->p_vmspace->vm_map;
+
+ /* ream out old pagetables and kernel stack */
+ (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
+
+ /* get new pagetables and kernel stack */
+ (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
+
+ /* force in the page table encompassing the UPAGES */
+ ptaddr = trunc_page((u_int)vtopte(addr));
+ vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
+
+ /* and force in (demand-zero) the UPAGES */
+ vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
+
+ /* get a kernel virtual address for the UPAGES for this proc */
+ up = (struct user *)kmem_alloc_pageable(kernel_map, UPAGES * NBPG);
+
+ /* and force-map the upages into the kernel pmap */
+ for (i = 0; i < UPAGES; i++)
+ pmap_enter(vm_map_pmap(kernel_map),
+ ((vm_offset_t) up) + NBPG * i,
+ pmap_extract(vp->pmap, addr + NBPG * i),
+ VM_PROT_READ|VM_PROT_WRITE, 1);
+
+ /* and allow the UPAGES page table entry to be paged (at the vm system level) */
+ vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
+
p2->p_addr = up;
/*
@@ -246,15 +255,7 @@ not yet clear, yet it does... */
((caddr_t)&up->u_stats.pstat_endcopy -
(caddr_t)&up->u_stats.pstat_startcopy));
-#ifdef i386
- { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp;
-
- vp = &p2->p_vmspace->vm_map;
- (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
- (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
- (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE);
- }
-#endif
+
/*
* cpu_fork will copy and update the kernel stack and pcb,
* and make the child ready to run. It marks the child
@@ -273,6 +274,7 @@ void
vm_init_limits(p)
register struct proc *p;
{
+ int tmp;
/*
* Set up the initial limits on process VM.
@@ -285,11 +287,13 @@ vm_init_limits(p)
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
- p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count);
+ tmp = ((2 * cnt.v_free_count) / 3) - 32;
+ if (cnt.v_free_count < 512)
+ tmp = cnt.v_free_count;
+ p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(tmp);
+ p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
-#include <vm/vm_pageout.h>
-
#ifdef DEBUG
int enableswap = 1;
int swapdebug = 0;
@@ -298,12 +302,67 @@ int swapdebug = 0;
#define SDB_SWAPOUT 4
#endif
+void
+faultin(p)
+struct proc *p;
+{
+ vm_offset_t i;
+ vm_offset_t vaddr, ptaddr;
+ vm_offset_t v, v1;
+ struct user *up;
+ int s;
+ int opflag;
+
+ if ((p->p_flag & P_INMEM) == 0) {
+ int rv0, rv1;
+ vm_map_t map;
+
+ ++p->p_lock;
+
+ map = &p->p_vmspace->vm_map;
+ /* force the page table encompassing the kernel stack (upages) */
+ ptaddr = trunc_page((u_int)vtopte(kstack));
+ vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
+
+ /* wire in the UPAGES */
+ vm_map_pageable(map, (vm_offset_t) kstack,
+ (vm_offset_t) kstack + UPAGES * NBPG, FALSE);
+
+ /* and map them nicely into the kernel pmap */
+ for (i = 0; i < UPAGES; i++) {
+ vm_offset_t off = i * NBPG;
+ vm_offset_t pa = (vm_offset_t)
+ pmap_extract(&p->p_vmspace->vm_pmap,
+ (vm_offset_t) kstack + off);
+ pmap_enter(vm_map_pmap(kernel_map),
+ ((vm_offset_t)p->p_addr) + off,
+ pa, VM_PROT_READ|VM_PROT_WRITE, 1);
+ }
+
+ /* and let the page table pages go (at least above pmap level) */
+ vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
+
+ s = splhigh();
+
+ if (p->p_stat == SRUN)
+ setrunqueue(p);
+
+ p->p_flag |= P_INMEM;
+
+ /* undo the effect of setting SLOCK above */
+ --p->p_lock;
+ splx(s);
+
+ }
+
+}
+
+int swapinreq;
+int percentactive;
/*
- * Brutally simple:
- * 1. Attempt to swapin every swaped-out, runnable process in
- * order of priority.
- * 2. If not enough memory, wake the pageout daemon and let it
- * clear some space.
+ * This swapin algorithm attempts to swap-in processes only if there
+ * is enough space for them. Of course, if a process waits for a long
+ * time, it will be swapped in anyway.
*/
void
scheduler()
@@ -313,88 +372,104 @@ scheduler()
struct proc *pp;
int ppri;
vm_offset_t addr;
- vm_size_t size;
+ int lastidle, lastrun;
+ int curidle, currun;
+ int forceload;
+ int percent;
+ int ntries;
+
+ lastidle = 0;
+ lastrun = 0;
loop:
-#ifdef DEBUG
- while (!enableswap)
- sleep((caddr_t)&proc0, PVM);
-#endif
+ ntries = 0;
+ vmmeter();
+
+ curidle = cp_time[CP_IDLE];
+ currun = cp_time[CP_USER] + cp_time[CP_SYS] + cp_time[CP_NICE];
+ percent = (100*(currun-lastrun)) / ( 1 + (currun-lastrun) + (curidle-lastidle));
+ lastrun = currun;
+ lastidle = curidle;
+ if( percent > 100)
+ percent = 100;
+ percentactive = percent;
+
+ if( percentactive < 25)
+ forceload = 1;
+ else
+ forceload = 0;
+
+loop1:
pp = NULL;
ppri = INT_MIN;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
+ int mempri;
pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
- if (pri > ppri) {
+ mempri = pri > 0 ? pri : 0;
+ /*
+ * if this process is higher priority and there is
+ * enough space, then select this process instead
+ * of the previous selection.
+ */
+ if (pri > ppri &&
+ (((cnt.v_free_count + (mempri * (4*PAGE_SIZE) / PAGE_SIZE) >= (p->p_vmspace->vm_swrss)) || (ntries > 0 && forceload)))) {
pp = p;
ppri = pri;
}
}
}
-#ifdef DEBUG
- if (swapdebug & SDB_FOLLOW)
- printf("sched: running, procp %x pri %d\n", pp, ppri);
-#endif
+
+ if ((pp == NULL) && (ntries == 0) && forceload) {
+ ++ntries;
+ goto loop1;
+ }
+
/*
* Nothing to do, back to sleep
*/
if ((p = pp) == NULL) {
- sleep((caddr_t)&proc0, PVM);
+ tsleep((caddr_t)&proc0, PVM, "sched", 0);
goto loop;
}
/*
- * We would like to bring someone in.
- * This part is really bogus cuz we could deadlock on memory
- * despite our feeble check.
+ * We would like to bring someone in. (only if there is space).
*/
- size = round_page(ctob(UPAGES));
- addr = (vm_offset_t) p->p_addr;
- if (cnt.v_free_count > atop(size)) {
-#ifdef DEBUG
- if (swapdebug & SDB_SWAPIN)
- printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
- p->p_pid, p->p_comm, p->p_addr,
- ppri, cnt.v_free_count);
-#endif
- vm_map_pageable(kernel_map, addr, addr+size, FALSE);
- /*
- * Some architectures need to be notified when the
- * user area has moved to new physical page(s) (e.g.
- * see pmax/pmax/vm_machdep.c).
- */
- cpu_swapin(p);
- (void) splstatclock();
- if (p->p_stat == SRUN)
- setrunqueue(p);
- p->p_flag |= P_INMEM;
- (void) spl0();
+/*
+ printf("swapin: %d, free: %d, res: %d, min: %d\n",
+ p->p_pid, cnt.v_free_count, cnt.v_free_reserved, cnt.v_free_min);
+*/
+ (void) splhigh();
+ if ((forceload && (cnt.v_free_count > (cnt.v_free_reserved + UPAGES + 1))) ||
+ (cnt.v_free_count >= cnt.v_free_min)) {
+ spl0();
+ faultin(p);
p->p_swtime = 0;
goto loop;
- }
+ }
+ /*
+ * log the memory shortage
+ */
+ swapinreq += p->p_vmspace->vm_swrss;
/*
* Not enough memory, jab the pageout daemon and wait til the
* coast is clear.
*/
-#ifdef DEBUG
- if (swapdebug & SDB_FOLLOW)
- printf("sched: no room for pid %d(%s), free %d\n",
- p->p_pid, p->p_comm, cnt.v_free_count);
-#endif
- (void) splhigh();
- VM_WAIT;
+ if( cnt.v_free_count < cnt.v_free_min) {
+ VM_WAIT;
+ } else {
+ tsleep((caddr_t)&proc0, PVM, "sched", 0);
+ }
(void) spl0();
-#ifdef DEBUG
- if (swapdebug & SDB_FOLLOW)
- printf("sched: room again, free %d\n", cnt.v_free_count);
-#endif
goto loop;
}
-#define swappable(p) \
- (((p)->p_flag & \
- (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)
+#define swappable(p) \
+ (((p)->p_lock == 0) && \
+ ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO)) == P_INMEM)
+extern int vm_pageout_free_min;
/*
* Swapout is driven by the pageout daemon. Very simple, we find eligible
* procs and unwire their u-areas. We try to always "swap" at least one
@@ -409,54 +484,86 @@ swapout_threads()
register struct proc *p;
struct proc *outp, *outp2;
int outpri, outpri2;
+ int tpri;
int didswap = 0;
+ int swapneeded = swapinreq;
extern int maxslp;
+ int runnablenow;
+ int s;
-#ifdef DEBUG
- if (!enableswap)
- return;
-#endif
+swapmore:
+ runnablenow = 0;
outp = outp2 = NULL;
- outpri = outpri2 = 0;
+ outpri = outpri2 = INT_MIN;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if (!swappable(p))
continue;
switch (p->p_stat) {
case SRUN:
- if (p->p_swtime > outpri2) {
+ ++runnablenow;
+ /*
+ * count the process as being in a runnable state
+ */
+ if ((tpri = p->p_swtime + p->p_nice * 8) > outpri2) {
outp2 = p;
- outpri2 = p->p_swtime;
+ outpri2 = tpri;
}
continue;
case SSLEEP:
case SSTOP:
- if (p->p_slptime >= maxslp) {
+ /*
+ * do not swapout a process that is waiting for VM datastructures
+ * there is a possible deadlock.
+ */
+ if (!lock_try_write( &p->p_vmspace->vm_map.lock)) {
+ continue;
+ }
+ vm_map_unlock( &p->p_vmspace->vm_map);
+ if (p->p_slptime > maxslp) {
swapout(p);
didswap++;
- } else if (p->p_slptime > outpri) {
+ } else if ((tpri = p->p_slptime + p->p_nice * 8) > outpri) {
outp = p;
- outpri = p->p_slptime;
+ outpri = tpri ;
}
continue;
}
}
/*
- * If we didn't get rid of any real duds, toss out the next most
- * likely sleeping/stopped or running candidate. We only do this
- * if we are real low on memory since we don't gain much by doing
- * it (UPAGES pages).
+ * We swapout only if there are more than two runnable processes or if
+ * another process needs some space to swapin.
*/
- if (didswap == 0 &&
- cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
- if ((p = outp) == 0)
- p = outp2;
-#ifdef DEBUG
- if (swapdebug & SDB_SWAPOUT)
- printf("swapout_threads: no duds, try procp %x\n", p);
-#endif
- if (p)
+ if ((swapinreq || ((percentactive > 90) && (runnablenow > 2))) &&
+ (((cnt.v_free_count + cnt.v_inactive_count) <= (cnt.v_free_target + cnt.v_inactive_target)) ||
+ (cnt.v_free_count < cnt.v_free_min))) {
+ if ((p = outp) == 0) {
+ p = outp2;
+ }
+
+ if (p) {
swapout(p);
+ didswap = 1;
+ }
+ }
+
+ /*
+ * if we previously had found a process to swapout, and we need to swapout
+ * more then try again.
+ */
+#if 0
+ if( p && swapinreq)
+ goto swapmore;
+#endif
+
+ /*
+ * If we swapped something out, and another process needed memory,
+ * then wakeup the sched process.
+ */
+ if (didswap) {
+ if (swapneeded)
+ wakeup((caddr_t)&proc0);
+ swapinreq = 0;
}
}
@@ -465,59 +572,37 @@ swapout(p)
register struct proc *p;
{
vm_offset_t addr;
- vm_size_t size;
+ struct pmap *pmap = &p->p_vmspace->vm_pmap;
+ vm_map_t map = &p->p_vmspace->vm_map;
+ vm_offset_t ptaddr;
+ int i;
-#ifdef DEBUG
- if (swapdebug & SDB_SWAPOUT)
- printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
- p->p_pid, p->p_comm, p->p_addr, p->p_stat,
- p->p_slptime, cnt.v_free_count);
-#endif
- size = round_page(ctob(UPAGES));
- addr = (vm_offset_t) p->p_addr;
-#if defined(hp300) || defined(luna68k)
+ ++p->p_stats->p_ru.ru_nswap;
/*
- * Ugh! u-area is double mapped to a fixed address behind the
- * back of the VM system and accesses are usually through that
- * address rather than the per-process address. Hence reference
- * and modify information are recorded at the fixed address and
- * lost at context switch time. We assume the u-struct and
- * kernel stack are always accessed/modified and force it to be so.
+ * remember the process resident count
*/
- {
- register int i;
- volatile long tmp;
-
- for (i = 0; i < UPAGES; i++) {
- tmp = *(long *)addr; *(long *)addr = tmp;
- addr += NBPG;
- }
- addr = (vm_offset_t) p->p_addr;
- }
-#endif
-#ifdef mips
+ p->p_vmspace->vm_swrss =
+ p->p_vmspace->vm_pmap.pm_stats.resident_count;
/*
- * Be sure to save the floating point coprocessor state before
- * paging out the u-struct.
+ * and decrement the amount of needed space
*/
- {
- extern struct proc *machFPCurProcPtr;
+ swapinreq -= min(swapinreq, p->p_vmspace->vm_pmap.pm_stats.resident_count);
- if (p == machFPCurProcPtr) {
- MachSaveCurFPState(p);
- machFPCurProcPtr = (struct proc *)0;
- }
- }
-#endif
-#ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */
- vm_map_pageable(kernel_map, addr, addr+size, TRUE);
- pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
-#endif
(void) splhigh();
p->p_flag &= ~P_INMEM;
if (p->p_stat == SRUN)
remrq(p);
(void) spl0();
+
+ ++p->p_lock;
+/* let the upages be paged */
+ pmap_remove(vm_map_pmap(kernel_map),
+ (vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
+
+ vm_map_pageable(map, (vm_offset_t) kstack,
+ (vm_offset_t) kstack + UPAGES * NBPG, TRUE);
+
+ --p->p_lock;
p->p_swtime = 0;
}
@@ -525,6 +610,7 @@ swapout(p)
* The rest of these routines fake thread handling
*/
+#ifndef assert_wait
void
assert_wait(event, ruptible)
int event;
@@ -535,44 +621,38 @@ assert_wait(event, ruptible)
#endif
curproc->p_thread = event;
}
+#endif
void
-thread_block()
+thread_block(char *msg)
{
- int s = splhigh();
-
if (curproc->p_thread)
- sleep((caddr_t)curproc->p_thread, PVM);
- splx(s);
+ tsleep((caddr_t)curproc->p_thread, PVM, msg, 0);
}
+
void
-thread_sleep(event, lock, ruptible)
+thread_sleep_(event, lock, wmesg)
int event;
simple_lock_t lock;
- boolean_t ruptible;
+ char *wmesg;
{
-#ifdef lint
- ruptible++;
-#endif
- int s = splhigh();
curproc->p_thread = event;
simple_unlock(lock);
- if (curproc->p_thread)
- sleep((caddr_t)event, PVM);
- splx(s);
+ if (curproc->p_thread) {
+ tsleep((caddr_t)event, PVM, wmesg, 0);
+ }
}
+#ifndef thread_wakeup
void
thread_wakeup(event)
int event;
{
- int s = splhigh();
-
wakeup((caddr_t)event);
- splx(s);
}
+#endif
/*
* DEBUG stuff
OpenPOWER on IntegriCloud