summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h7
-rw-r--r--sys/vm/vm_contig.c8
-rw-r--r--sys/vm/vm_extern.h26
-rw-r--r--sys/vm/vm_fault.c2
-rw-r--r--sys/vm/vm_glue.c155
-rw-r--r--sys/vm/vm_map.c28
-rw-r--r--sys/vm/vm_map.h2
-rw-r--r--sys/vm/vm_meter.c45
-rw-r--r--sys/vm/vm_mmap.c112
-rw-r--r--sys/vm/vm_object.c2
-rw-r--r--sys/vm/vm_pageout.c21
-rw-r--r--sys/vm/vm_swap.c32
-rw-r--r--sys/vm/vm_unix.c12
-rw-r--r--sys/vm/vm_zeroidle.c8
-rw-r--r--sys/vm/vnode_pager.c16
15 files changed, 263 insertions, 213 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 4ff3321..89432aa 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -89,6 +89,7 @@ typedef struct pmap_statistics *pmap_statistics_t;
#ifdef _KERNEL
struct proc;
+struct thread;
#ifdef __alpha__
void pmap_page_is_free __P((vm_page_t m));
@@ -138,7 +139,11 @@ void pmap_new_proc __P((struct proc *p));
void pmap_dispose_proc __P((struct proc *p));
void pmap_swapout_proc __P((struct proc *p));
void pmap_swapin_proc __P((struct proc *p));
-void pmap_activate __P((struct proc *p));
+void pmap_new_thread __P((struct thread *td));
+void pmap_dispose_thread __P((struct thread *td));
+void pmap_swapout_thread __P((struct thread *td));
+void pmap_swapin_thread __P((struct thread *td));
+void pmap_activate __P((struct thread *td));
vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size));
void *pmap_kenter_temporary __P((vm_offset_t pa, int i));
void pmap_init2 __P((void));
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 50534db..b515e85 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -152,9 +152,9 @@ again1:
vm_page_test_dirty(m);
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
- vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
- VOP_UNLOCK(m->object->handle, 0, curproc);
+ VOP_UNLOCK(m->object->handle, 0, curthread);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
@@ -179,9 +179,9 @@ again1:
vm_page_test_dirty(m);
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
- vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
- VOP_UNLOCK(m->object->handle, 0, curproc);
+ VOP_UNLOCK(m->object->handle, 0, curthread);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 9ff0191..79bd5d8 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -47,16 +47,16 @@ struct vnode;
#ifdef _KERNEL
#ifdef TYPEDEF_FOR_UAP
-int getpagesize __P((struct proc * p, void *, int *));
-int madvise __P((struct proc *, void *, int *));
-int mincore __P((struct proc *, void *, int *));
-int mprotect __P((struct proc *, void *, int *));
-int msync __P((struct proc *, void *, int *));
-int munmap __P((struct proc *, void *, int *));
-int obreak __P((struct proc *, void *, int *));
-int sbrk __P((struct proc *, void *, int *));
-int sstk __P((struct proc *, void *, int *));
-int swapon __P((struct proc *, void *, int *));
+int getpagesize __P((struct thread *, void *, int *));
+int madvise __P((struct thread *, void *, int *));
+int mincore __P((struct thread *, void *, int *));
+int mprotect __P((struct thread *, void *, int *));
+int msync __P((struct thread *, void *, int *));
+int munmap __P((struct thread *, void *, int *));
+int obreak __P((struct thread *, void *, int *));
+int sbrk __P((struct thread *, void *, int *));
+int sstk __P((struct thread *, void *, int *));
+int swapon __P((struct thread *, void *, int *));
#endif
int grow __P((struct proc *, size_t));
@@ -71,8 +71,8 @@ void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_init __P((vm_offset_t, vm_offset_t));
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t));
-void munmapfd __P((struct proc *, int));
-int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long));
+void munmapfd __P((struct thread *, int));
+int swaponvp __P((struct thread *, struct vnode *, dev_t , u_long));
void swapout_procs __P((int));
int useracc __P((caddr_t, int, int));
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, int));
@@ -80,7 +80,7 @@ void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
-void vm_forkproc __P((struct proc *, struct proc *, int));
+void vm_forkproc __P((struct thread *, struct proc *, int));
void vm_waitproc __P((struct proc *));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index c098088..3507353 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -666,7 +666,7 @@ readrest:
* grab the lock if we need to
*/
(fs.lookup_still_valid ||
- lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0)
+ lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curthread) == 0)
) {
fs.lookup_still_valid = 1;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 5638175..d239516 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -209,10 +209,12 @@ vsunlock(addr, len)
* to user mode to avoid stack copying and relocation problems.
*/
void
-vm_forkproc(p1, p2, flags)
- struct proc *p1, *p2;
+vm_forkproc(td, p2, flags)
+ struct thread *td;
+ struct proc *p2;
int flags;
{
+ struct proc *p1 = td->td_proc;
struct user *up;
GIANT_REQUIRED;
@@ -228,7 +230,7 @@ vm_forkproc(p1, p2, flags)
vmspace_unshare(p1);
}
}
- cpu_fork(p1, p2, flags);
+ cpu_fork(td, p2, flags);
return;
}
@@ -251,8 +253,10 @@ vm_forkproc(p1, p2, flags)
}
pmap_new_proc(p2);
+ pmap_new_thread(&p2->p_thread); /* Initial thread */
- up = p2->p_addr;
+ /* XXXKSE this is unsatisfactory but should be adequate */
+ up = p2->p_uarea;
/*
* p_stats currently points at fields in the user struct
@@ -282,7 +286,7 @@ vm_forkproc(p1, p2, flags)
* cpu_fork will copy and update the pcb, set up the kernel stack,
* and make the child ready to run.
*/
- cpu_fork(p1, p2, flags);
+ cpu_fork(td, p2, flags);
}
/*
@@ -294,10 +298,13 @@ void
vm_waitproc(p)
struct proc *p;
{
+ struct thread *td;
GIANT_REQUIRED;
cpu_wait(p);
pmap_dispose_proc(p); /* drop per-process resources */
+ FOREACH_THREAD_IN_PROC(p, td)
+ pmap_dispose_thread(td);
vmspace_free(p->p_vmspace); /* and clean-out the vmspace */
}
@@ -338,6 +345,7 @@ void
faultin(p)
struct proc *p;
{
+ struct thread *td;
GIANT_REQUIRED;
PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -348,12 +356,14 @@ faultin(p)
PROC_UNLOCK(p);
pmap_swapin_proc(p);
+ FOREACH_THREAD_IN_PROC (p, td)
+ pmap_swapin_thread(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
- if (p->p_stat == SRUN) {
- setrunqueue(p);
- }
+ FOREACH_THREAD_IN_PROC (p, td)
+ if (td->td_proc->p_stat == SRUN) /* XXXKSE */
+ setrunqueue(td);
p->p_sflag |= PS_INMEM;
@@ -368,6 +378,8 @@ faultin(p)
* is enough space for them. Of course, if a process waits for a long
* time, it will be swapped in anyway.
*
+ * XXXKSE - KSEGRP with highest priority counts..
+ *
* Giant is still held at this point, to be released in tsleep.
*/
/* ARGSUSED*/
@@ -392,24 +404,29 @@ loop:
pp = NULL;
ppri = INT_MIN;
sx_slock(&allproc_lock);
- LIST_FOREACH(p, &allproc, p_list) {
+ FOREACH_PROC_IN_SYSTEM(p) {
+ struct ksegrp *kg;
mtx_lock_spin(&sched_lock);
- if (p->p_stat == SRUN &&
- (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
-
- pri = p->p_swtime + p->p_slptime;
- if ((p->p_sflag & PS_SWAPINREQ) == 0) {
- pri -= p->p_nice * 8;
- }
-
- /*
- * if this process is higher priority and there is
- * enough space, then select this process instead of
- * the previous selection.
- */
- if (pri > ppri) {
- pp = p;
- ppri = pri;
+ if (p->p_stat == SRUN
+ && (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
+ /* Find the minimum sleeptime for the process */
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ pri = p->p_swtime + kg->kg_slptime;
+ if ((p->p_sflag & PS_SWAPINREQ) == 0) {
+ pri -= kg->kg_nice * 8;
+ }
+
+
+ /*
+ * if this ksegrp is higher priority
+ * and there is enough space, then select
+ * this process instead of the previous
+ * selection.
+ */
+ if (pri > ppri) {
+ pp = p;
+ ppri = pri;
+ }
}
}
mtx_unlock_spin(&sched_lock);
@@ -469,6 +486,7 @@ swapout_procs(action)
int action;
{
struct proc *p;
+ struct ksegrp *kg;
struct proc *outp, *outp2;
int outpri, outpri2;
int didswap = 0;
@@ -481,6 +499,7 @@ retry:
sx_slock(&allproc_lock);
LIST_FOREACH(p, &allproc, p_list) {
struct vmspace *vm;
+ int minslptime = 100000;
PROC_LOCK(p);
if (p->p_lock != 0 ||
@@ -511,50 +530,59 @@ retry:
case SSTOP:
/*
* do not swapout a realtime process
+ * Check all the thread groups..
*/
- if (PRI_IS_REALTIME(p->p_pri.pri_class)) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- continue;
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ if (PRI_IS_REALTIME(kg->kg_pri.pri_class)) {
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ goto nextproc;
+ }
+
+ /*
+ * Do not swapout a process waiting
+ * on a critical event of some kind.
+ * Also guarantee swap_idle_threshold1
+ * time in memory.
+ */
+ if (((kg->kg_pri.pri_level) < PSOCK) ||
+ (kg->kg_slptime < swap_idle_threshold1)) {
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ goto nextproc;
+ }
+
+ /*
+ * If the system is under memory stress,
+ * or if we are swapping
+ * idle processes >= swap_idle_threshold2,
+ * then swap the process out.
+ */
+ if (((action & VM_SWAP_NORMAL) == 0) &&
+ (((action & VM_SWAP_IDLE) == 0) ||
+ (kg->kg_slptime < swap_idle_threshold2))) {
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ goto nextproc;
+ }
+ if (minslptime > kg->kg_slptime)
+ minslptime = kg->kg_slptime;
}
- /*
- * Do not swapout a process waiting on a critical
- * event of some kind. Also guarantee swap_idle_threshold1
- * time in memory.
- */
- if (((p->p_pri.pri_level) < PSOCK) ||
- (p->p_slptime < swap_idle_threshold1)) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- continue;
- }
-
- /*
- * If the system is under memory stress, or if we are swapping
- * idle processes >= swap_idle_threshold2, then swap the process
- * out.
- */
- if (((action & VM_SWAP_NORMAL) == 0) &&
- (((action & VM_SWAP_IDLE) == 0) ||
- (p->p_slptime < swap_idle_threshold2))) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- continue;
- }
mtx_unlock_spin(&sched_lock);
-
++vm->vm_refcnt;
/*
- * do not swapout a process that is waiting for VM
- * data structures there is a possible deadlock.
+ * do not swapout a process that
+ * is waiting for VM
+ * data structures there is a
+ * possible deadlock.
*/
if (lockmgr(&vm->vm_map.lock,
LK_EXCLUSIVE | LK_NOWAIT,
- NULL, curproc)) {
+ NULL, curthread)) {
vmspace_free(vm);
PROC_UNLOCK(p);
- continue;
+ goto nextproc;
}
vm_map_unlock(&vm->vm_map);
/*
@@ -563,7 +591,7 @@ retry:
*/
if ((action & VM_SWAP_NORMAL) ||
((action & VM_SWAP_IDLE) &&
- (p->p_slptime > swap_idle_threshold2))) {
+ (minslptime > swap_idle_threshold2))) {
sx_sunlock(&allproc_lock);
swapout(p);
vmspace_free(vm);
@@ -573,6 +601,7 @@ retry:
PROC_UNLOCK(p);
vmspace_free(vm);
}
+nextproc:
}
sx_sunlock(&allproc_lock);
/*
@@ -587,6 +616,7 @@ static void
swapout(p)
struct proc *p;
{
+ struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
#if defined(SWAP_DEBUG)
@@ -602,11 +632,14 @@ swapout(p)
p->p_sflag &= ~PS_INMEM;
p->p_sflag |= PS_SWAPPING;
PROC_UNLOCK_NOSWITCH(p);
- if (p->p_stat == SRUN)
- remrunqueue(p);
+ FOREACH_THREAD_IN_PROC (p, td)
+ if (td->td_proc->p_stat == SRUN) /* XXXKSE */
+ remrunqueue(td); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
pmap_swapout_proc(p);
+ FOREACH_THREAD_IN_PROC(p, td)
+ pmap_swapout_thread(td);
mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPPING;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 72c0a06..e7a4898 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -266,7 +266,7 @@ void
vm_map_lock(vm_map_t map)
{
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
- if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curproc) != 0)
+ if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0)
panic("vm_map_lock: failed to get lock");
map->timestamp++;
}
@@ -275,29 +275,29 @@ void
vm_map_unlock(vm_map_t map)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
- lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc);
+ lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
}
void
vm_map_lock_read(vm_map_t map)
{
vm_map_printf("locking map LK_SHARED: %p\n", map);
- lockmgr(&(map)->lock, LK_SHARED, NULL, curproc);
+ lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
}
void
vm_map_unlock_read(vm_map_t map)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
- lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc);
+ lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
}
static __inline__ int
-_vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
+_vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
int error;
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
- error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, p);
+ error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
if (error == 0)
map->timestamp++;
return error;
@@ -306,14 +306,14 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
int
vm_map_lock_upgrade(vm_map_t map)
{
- return(_vm_map_lock_upgrade(map, curproc));
+ return(_vm_map_lock_upgrade(map, curthread));
}
void
vm_map_lock_downgrade(vm_map_t map)
{
vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
- lockmgr(&map->lock, LK_DOWNGRADE, NULL, curproc);
+ lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
}
void
@@ -1863,7 +1863,7 @@ vm_map_clean(
int flags;
vm_object_reference(object);
- vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
vm_object_page_clean(object,
@@ -1877,7 +1877,7 @@ vm_map_clean(
OFF_TO_IDX(offset + size + PAGE_MASK),
FALSE);
}
- VOP_UNLOCK(object->handle, 0, curproc);
+ VOP_UNLOCK(object->handle, 0, curthread);
vm_object_deallocate(object);
}
start += size;
@@ -2619,8 +2619,8 @@ vmspace_exec(struct proc *p)
p->p_vmspace = newvmspace;
pmap_pinit2(vmspace_pmap(newvmspace));
vmspace_free(oldvmspace);
- if (p == curproc)
- pmap_activate(p);
+ if (p == curthread->td_proc) /* XXXKSE ? */
+ pmap_activate(curthread);
}
/*
@@ -2641,8 +2641,8 @@ vmspace_unshare(struct proc *p)
p->p_vmspace = newvmspace;
pmap_pinit2(vmspace_pmap(newvmspace));
vmspace_free(oldvmspace);
- if (p == curproc)
- pmap_activate(p);
+ if (p == curthread->td_proc) /* XXXKSE ? */
+ pmap_activate(curthread);
}
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 8839cd8..3776a6e 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -210,7 +210,7 @@ void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior);
#define vm_map_lock_drain_interlock(map) \
do { \
lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
- &(map)->ref_lock, curproc); \
+ &(map)->ref_lock, curthread); \
(map)->timestamp++; \
} while(0)
#endif
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 20346fa..c3697be 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -73,27 +73,34 @@ static fixpt_t cexp[3] = {
/*
* Compute a tenex style load average of a quantity on
* 1, 5 and 15 minute intervals.
+ * XXXKSE Needs complete rewrite when correct info is available.
+ * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
*/
static void
loadav(struct loadavg *avg)
{
int i, nrun;
struct proc *p;
+ struct ksegrp *kg;
sx_slock(&allproc_lock);
- for (nrun = 0, p = LIST_FIRST(&allproc); p != 0; p = LIST_NEXT(p, p_list)) {
- switch (p->p_stat) {
- case SSLEEP:
- if (p->p_pri.pri_level > PZERO ||
- p->p_slptime != 0)
- continue;
- /* FALLTHROUGH */
- case SRUN:
- if ((p->p_flag & P_NOLOAD) != 0)
- continue;
- /* FALLTHROUGH */
- case SIDL:
- nrun++;
+ nrun = 0;
+ FOREACH_PROC_IN_SYSTEM(p) {
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ switch (p->p_stat) {
+ case SSLEEP:
+ if (kg->kg_pri.pri_level > PZERO ||
+ kg->kg_slptime != 0) /* ke? */
+ goto nextproc;
+ /* FALLTHROUGH */
+ case SRUN:
+ if ((p->p_flag & P_NOLOAD) != 0)
+ goto nextproc;
+ /* FALLTHROUGH */
+ case SIDL:
+ nrun++;
+ }
+nextproc:
}
}
sx_sunlock(&allproc_lock);
@@ -139,6 +146,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
vm_object_t object;
vm_map_t map;
int paging;
+ struct ksegrp *kg;
totalp = &total;
bzero(totalp, sizeof *totalp);
@@ -152,7 +160,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
* Calculate process statistics.
*/
sx_slock(&allproc_lock);
- LIST_FOREACH(p, &allproc, p_list) {
+ FOREACH_PROC_IN_SYSTEM(p) {
if (p->p_flag & P_SYSTEM)
continue;
mtx_lock_spin(&sched_lock);
@@ -164,14 +172,15 @@ vmtotal(SYSCTL_HANDLER_ARGS)
case SMTX:
case SSLEEP:
case SSTOP:
+ kg = &p->p_ksegrp; /* XXXKSE */
if (p->p_sflag & PS_INMEM) {
- if (p->p_pri.pri_level <= PZERO)
+ if (kg->kg_pri.pri_level <= PZERO)
totalp->t_dw++;
- else if (p->p_slptime < maxslp)
+ else if (kg->kg_slptime < maxslp)
totalp->t_sl++;
- } else if (p->p_slptime < maxslp)
+ } else if (kg->kg_slptime < maxslp)
totalp->t_sw++;
- if (p->p_slptime >= maxslp) {
+ if (kg->kg_slptime >= maxslp) {
mtx_unlock_spin(&sched_lock);
continue;
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 17fd7f2..0336529 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -112,8 +112,8 @@ vmmapentry_rsrc_init(dummy)
*/
/* ARGSUSED */
int
-sbrk(p, uap)
- struct proc *p;
+sbrk(td, uap)
+ struct thread *td;
struct sbrk_args *uap;
{
/* Not yet implemented */
@@ -133,8 +133,8 @@ struct sstk_args {
*/
/* ARGSUSED */
int
-sstk(p, uap)
- struct proc *p;
+sstk(td, uap)
+ struct thread *td;
struct sstk_args *uap;
{
/* Not yet implemented */
@@ -152,12 +152,12 @@ struct getpagesize_args {
/* ARGSUSED */
int
-ogetpagesize(p, uap)
- struct proc *p;
+ogetpagesize(td, uap)
+ struct thread *td;
struct getpagesize_args *uap;
{
/* MP SAFE */
- p->p_retval[0] = PAGE_SIZE;
+ td->td_retval[0] = PAGE_SIZE;
return (0);
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
@@ -196,11 +196,11 @@ struct mmap_args {
* MPSAFE
*/
int
-mmap(p, uap)
- struct proc *p;
+mmap(td, uap)
+ struct thread *td;
struct mmap_args *uap;
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
struct file *fp = NULL;
struct vnode *vp;
vm_offset_t addr;
@@ -210,7 +210,7 @@ mmap(p, uap)
int flags, error;
int disablexworkaround;
off_t pos;
- struct vmspace *vms = p->p_vmspace;
+ struct vmspace *vms = td->td_proc->p_vmspace;
vm_object_t obj;
addr = (vm_offset_t) uap->addr;
@@ -358,7 +358,7 @@ mmap(p, uap)
if (securelevel >= 1)
disablexworkaround = 1;
else
- disablexworkaround = suser(p);
+ disablexworkaround = suser_td(td);
if (vp->v_type == VCHR && disablexworkaround &&
(flags & (MAP_PRIVATE|MAP_COPY))) {
error = EINVAL;
@@ -396,7 +396,7 @@ mmap(p, uap)
struct vattr va;
if ((error =
VOP_GETATTR(vp, &va,
- p->p_ucred, p))) {
+ td->td_proc->p_ucred, td))) {
goto done;
}
if ((va.va_flags &
@@ -433,11 +433,11 @@ mmap(p, uap)
error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
flags, handle, pos);
if (error == 0)
- p->p_retval[0] = (register_t) (addr + pageoff);
+ td->td_retval[0] = (register_t) (addr + pageoff);
mtx_lock(&Giant);
done:
if (fp)
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -455,8 +455,8 @@ struct ommap_args {
};
#endif
int
-ommap(p, uap)
- struct proc *p;
+ommap(td, uap)
+ struct thread *td;
struct ommap_args *uap;
{
struct mmap_args nargs;
@@ -492,7 +492,7 @@ ommap(p, uap)
nargs.flags |= MAP_FIXED;
nargs.fd = uap->fd;
nargs.pos = uap->pos;
- return (mmap(p, &nargs));
+ return (mmap(td, &nargs));
}
#endif /* COMPAT_43 */
@@ -508,8 +508,8 @@ struct msync_args {
* MPSAFE
*/
int
-msync(p, uap)
- struct proc *p;
+msync(td, uap)
+ struct thread *td;
struct msync_args *uap;
{
vm_offset_t addr;
@@ -534,7 +534,7 @@ msync(p, uap)
mtx_lock(&Giant);
- map = &p->p_vmspace->vm_map;
+ map = &td->td_proc->p_vmspace->vm_map;
/*
* XXX Gak! If size is zero we are supposed to sync "all modified
@@ -588,8 +588,8 @@ struct munmap_args {
* MPSAFE
*/
int
-munmap(p, uap)
- struct proc *p;
+munmap(td, uap)
+ struct thread *td;
struct munmap_args *uap;
{
vm_offset_t addr;
@@ -620,7 +620,7 @@ munmap(p, uap)
return (EINVAL);
#endif
mtx_lock(&Giant);
- map = &p->p_vmspace->vm_map;
+ map = &td->td_proc->p_vmspace->vm_map;
/*
* Make sure entire range is allocated.
*/
@@ -636,14 +636,14 @@ munmap(p, uap)
#if 0
void
-munmapfd(p, fd)
- struct proc *p;
+munmapfd(td, fd)
+ struct thread *td;
int fd;
{
/*
* XXX should unmap any regions mapped to this file
*/
- p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
+ td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
}
#endif
@@ -658,8 +658,8 @@ struct mprotect_args {
* MPSAFE
*/
int
-mprotect(p, uap)
- struct proc *p;
+mprotect(td, uap)
+ struct thread *td;
struct mprotect_args *uap;
{
vm_offset_t addr;
@@ -683,7 +683,7 @@ mprotect(p, uap)
return(EINVAL);
mtx_lock(&Giant);
- ret = vm_map_protect(&p->p_vmspace->vm_map, addr,
+ ret = vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
addr + size, prot, FALSE);
mtx_unlock(&Giant);
switch (ret) {
@@ -706,8 +706,8 @@ struct minherit_args {
* MPSAFE
*/
int
-minherit(p, uap)
- struct proc *p;
+minherit(td, uap)
+ struct thread *td;
struct minherit_args *uap;
{
vm_offset_t addr;
@@ -727,7 +727,7 @@ minherit(p, uap)
return(EINVAL);
mtx_lock(&Giant);
- ret = vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
+ ret = vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, addr+size,
inherit);
mtx_unlock(&Giant);
@@ -753,8 +753,8 @@ struct madvise_args {
*/
/* ARGSUSED */
int
-madvise(p, uap)
- struct proc *p;
+madvise(td, uap)
+ struct thread *td;
struct madvise_args *uap;
{
vm_offset_t start, end;
@@ -787,7 +787,7 @@ madvise(p, uap)
end = round_page((vm_offset_t) uap->addr + uap->len);
mtx_lock(&Giant);
- ret = vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav);
+ ret = vm_map_madvise(&td->td_proc->p_vmspace->vm_map, start, end, uap->behav);
mtx_unlock(&Giant);
return (ret ? EINVAL : 0);
}
@@ -805,8 +805,8 @@ struct mincore_args {
*/
/* ARGSUSED */
int
-mincore(p, uap)
- struct proc *p;
+mincore(td, uap)
+ struct thread *td;
struct mincore_args *uap;
{
vm_offset_t addr, first_addr;
@@ -838,8 +838,8 @@ mincore(p, uap)
vec = uap->vec;
mtx_lock(&Giant);
- map = &p->p_vmspace->vm_map;
- pmap = vmspace_pmap(p->p_vmspace);
+ map = &td->td_proc->p_vmspace->vm_map;
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
vm_map_lock_read(map);
RestartScan:
@@ -1001,8 +1001,8 @@ struct mlock_args {
* MPSAFE
*/
int
-mlock(p, uap)
- struct proc *p;
+mlock(td, uap)
+ struct thread *td;
struct mlock_args *uap;
{
vm_offset_t addr;
@@ -1025,17 +1025,17 @@ mlock(p, uap)
return (EAGAIN);
#ifdef pmap_wired_count
- if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
- p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
+ if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) >
+ td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
return (ENOMEM);
#else
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
#endif
mtx_lock(&Giant);
- error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
+ error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr,
addr + size, FALSE);
mtx_unlock(&Giant);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
@@ -1051,8 +1051,8 @@ struct mlockall_args {
* MPSAFE
*/
int
-mlockall(p, uap)
- struct proc *p;
+mlockall(td, uap)
+ struct thread *td;
struct mlockall_args *uap;
{
/* mtx_lock(&Giant); */
@@ -1070,8 +1070,8 @@ struct mlockall_args {
* MPSAFE
*/
int
-munlockall(p, uap)
- struct proc *p;
+munlockall(td, uap)
+ struct thread *td;
struct munlockall_args *uap;
{
/* mtx_lock(&Giant); */
@@ -1089,8 +1089,8 @@ struct munlock_args {
* MPSAFE
*/
int
-munlock(p, uap)
- struct proc *p;
+munlock(td, uap)
+ struct thread *td;
struct munlock_args *uap;
{
vm_offset_t addr;
@@ -1110,13 +1110,13 @@ munlock(p, uap)
return (EINVAL);
#ifndef pmap_wired_count
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
#endif
mtx_lock(&Giant);
- error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
+ error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr,
addr + size, TRUE);
mtx_unlock(&Giant);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
@@ -1143,7 +1143,7 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
int rv = KERN_SUCCESS;
vm_ooffset_t objsize;
int docow;
- struct proc *p = curproc;
+ struct thread *td = curthread;
if (size == 0)
return (0);
@@ -1192,7 +1192,7 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
struct vattr vat;
int error;
- error = VOP_GETATTR(vp, &vat, p->p_ucred, p);
+ error = VOP_GETATTR(vp, &vat, td->td_proc->p_ucred, td);
if (error) {
mtx_unlock(&Giant);
return (error);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index fe7e8c6..1d5a989 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -326,7 +326,7 @@ vm_object_reference(vm_object_t object)
object->ref_count++;
if (object->type == OBJT_VNODE) {
- while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
+ while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curthread)) {
printf("vm_object_reference: delay in getting object\n");
}
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 187d8ba7..be7df98 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -546,7 +546,7 @@ vm_pageout_map_deactivate_pages(map, desired)
vm_object_t obj, bigobj;
GIANT_REQUIRED;
- if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
+ if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curthread)) {
return;
}
@@ -863,7 +863,7 @@ rescan0:
mp = NULL;
if (vp->v_type == VREG)
vn_start_write(vp, &mp, V_NOWAIT);
- if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
+ if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curthread)) {
vn_finished_write(mp);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
@@ -1162,11 +1162,14 @@ rescan0:
}
sx_sunlock(&allproc_lock);
if (bigproc != NULL) {
+ struct ksegrp *kg;
killproc(bigproc, "out of swap space");
mtx_lock_spin(&sched_lock);
- bigproc->p_estcpu = 0;
- bigproc->p_nice = PRIO_MIN;
- resetpriority(bigproc);
+ FOREACH_KSEGRP_IN_PROC(bigproc, kg) {
+ kg->kg_estcpu = 0;
+ kg->kg_nice = PRIO_MIN; /* XXXKSE ??? */
+ resetpriority(kg);
+ }
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(bigproc);
wakeup(&cnt.v_free_count);
@@ -1358,9 +1361,9 @@ vm_pageout()
if (vm_pageout_stats_free_max == 0)
vm_pageout_stats_free_max = 5;
- PROC_LOCK(curproc);
- curproc->p_flag |= P_BUFEXHAUST;
- PROC_UNLOCK(curproc);
+ PROC_LOCK(curthread->td_proc);
+ curthread->td_proc->p_flag |= P_BUFEXHAUST;
+ PROC_UNLOCK(curthread->td_proc);
swap_pager_swap_init();
pass = 0;
/*
@@ -1421,7 +1424,7 @@ vm_pageout()
void
pagedaemon_wakeup()
{
- if (!vm_pages_needed && curproc != pageproc) {
+ if (!vm_pages_needed && curthread->td_proc != pageproc) {
vm_pages_needed++;
wakeup(&vm_pages_needed);
}
diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c
index 72f7b9c..1a26712 100644
--- a/sys/vm/vm_swap.c
+++ b/sys/vm/vm_swap.c
@@ -188,8 +188,8 @@ struct swapon_args {
*/
/* ARGSUSED */
int
-swapon(p, uap)
- struct proc *p;
+swapon(td, uap)
+ struct thread *td;
struct swapon_args *uap;
{
struct vattr attr;
@@ -198,8 +198,7 @@ swapon(p, uap)
int error;
mtx_lock(&Giant);
-
- error = suser(p);
+ error = suser_td(td);
if (error)
goto done2;
@@ -212,7 +211,7 @@ swapon(p, uap)
goto done2;
}
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td);
error = namei(&nd);
if (error)
goto done2;
@@ -221,14 +220,14 @@ swapon(p, uap)
vp = nd.ni_vp;
if (vn_isdisk(vp, &error))
- error = swaponvp(p, vp, vp->v_rdev, 0);
+ error = swaponvp(td, vp, vp->v_rdev, 0);
else if (vp->v_type == VREG && vp->v_tag == VT_NFS &&
- (error = VOP_GETATTR(vp, &attr, p->p_ucred, p)) == 0) {
+ (error = VOP_GETATTR(vp, &attr, td->td_proc->p_ucred, td)) == 0) {
/*
* Allow direct swapping to NFS regular files in the same
* way that nfs_mountroot() sets up diskless swapping.
*/
- error = swaponvp(p, vp, NODEV, attr.va_size / DEV_BSIZE);
+ error = swaponvp(td, vp, NODEV, attr.va_size / DEV_BSIZE);
}
if (error)
@@ -250,8 +249,8 @@ done2:
* XXX locking when multiple swapon's run in parallel
*/
int
-swaponvp(p, vp, dev, nblks)
- struct proc *p;
+swaponvp(td, vp, dev, nblks)
+ struct thread *td;
struct vnode *vp;
dev_t dev;
u_long nblks;
@@ -263,6 +262,7 @@ swaponvp(p, vp, dev, nblks)
swblk_t dvbase;
int error;
u_long aligned_nblks;
+ struct proc *p = td->td_proc;
if (!swapdev_vp) {
error = getnewvnode(VT_NON, NULL, swapdev_vnodeop_p,
@@ -282,19 +282,19 @@ swaponvp(p, vp, dev, nblks)
}
return EINVAL;
found:
- (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_OPEN(vp, FREAD | FWRITE, p->p_ucred, p);
- (void) VOP_UNLOCK(vp, 0, p);
+ (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_OPEN(vp, FREAD | FWRITE, p->p_ucred, td);
+ (void) VOP_UNLOCK(vp, 0, td);
if (error)
return (error);
if (nblks == 0 && dev != NODEV && (devsw(dev)->d_psize == 0 ||
(nblks = (*devsw(dev)->d_psize) (dev)) == -1)) {
- (void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
+ (void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, td);
return (ENXIO);
}
if (nblks == 0) {
- (void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
+ (void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, td);
return (ENXIO);
}
@@ -305,7 +305,7 @@ swaponvp(p, vp, dev, nblks)
if (nblks > 0x40000000 / BLIST_META_RADIX / nswdev) {
printf("exceeded maximum of %d blocks per swap unit\n",
0x40000000 / BLIST_META_RADIX / nswdev);
- (void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
+ (void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, td);
return (ENXIO);
}
/*
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 10a5f65..47a768f 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -70,11 +70,11 @@ struct obreak_args {
*/
/* ARGSUSED */
int
-obreak(p, uap)
- struct proc *p;
+obreak(td, uap)
+ struct thread *td;
struct obreak_args *uap;
{
- struct vmspace *vm = p->p_vmspace;
+ struct vmspace *vm = td->td_proc->p_vmspace;
vm_offset_t new, old, base;
int rv;
int error = 0;
@@ -90,7 +90,7 @@ obreak(p, uap)
* reduce their usage, even if they remain over the limit.
*/
if (new > old &&
- (new - base) > (unsigned) p->p_rlimit[RLIMIT_DATA].rlim_cur) {
+ (new - base) > (unsigned) td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur) {
error = ENOMEM;
goto done;
}
@@ -143,8 +143,8 @@ struct ovadvise_args {
*/
/* ARGSUSED */
int
-ovadvise(p, uap)
- struct proc *p;
+ovadvise(td, uap)
+ struct thread *td;
struct ovadvise_args *uap;
{
/* START_GIANT_OPTIONAL */
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 1fcab4d..a7e5685 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -112,14 +112,14 @@ vm_page_zero_idle_wakeup(void)
static void
vm_pagezero(void)
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
struct rtprio rtp;
int pages = 0;
rtp.prio = RTP_PRIO_MAX;
rtp.type = RTP_PRIO_IDLE;
mtx_lock_spin(&sched_lock);
- rtp_to_pri(&rtp, &p->p_pri);
+ rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
mtx_unlock_spin(&sched_lock);
for (;;) {
@@ -127,8 +127,8 @@ vm_pagezero(void)
pages += vm_page_zero_idle();
if (pages > idlezero_maxrun) {
mtx_lock_spin(&sched_lock);
- setrunqueue(p);
- p->p_stats->p_ru.ru_nvcsw++;
+ setrunqueue(td);
+ td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
pages = 0;
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 7edaebc..bad9bcf 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -434,7 +434,7 @@ vnode_pager_input_smlfs(object, m)
/* build a minimal buffer header */
bp->b_iocmd = BIO_READ;
bp->b_iodone = vnode_pager_iodone;
- bp->b_rcred = bp->b_wcred = curproc->p_ucred;
+ bp->b_rcred = bp->b_wcred = curthread->td_proc->p_ucred;
if (bp->b_rcred != NOCRED)
crhold(bp->b_rcred);
if (bp->b_wcred != NOCRED)
@@ -527,9 +527,9 @@ vnode_pager_input_old(object, m)
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_READ;
auio.uio_resid = size;
- auio.uio_procp = curproc;
+ auio.uio_td = curthread;
- error = VOP_READ(vp, &auio, 0, curproc->p_ucred);
+ error = VOP_READ(vp, &auio, 0, curthread->td_proc->p_ucred);
if (!error) {
int count = size - auio.uio_resid;
@@ -754,7 +754,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
bp->b_iocmd = BIO_READ;
bp->b_iodone = vnode_pager_iodone;
/* B_PHYS is not set, but it is nice to fill this in */
- bp->b_rcred = bp->b_wcred = curproc->p_ucred;
+ bp->b_rcred = bp->b_wcred = curthread->td_proc->p_ucred;
if (bp->b_rcred != NOCRED)
crhold(bp->b_rcred);
if (bp->b_wcred != NOCRED)
@@ -983,8 +983,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_segflg = UIO_NOCOPY;
auio.uio_rw = UIO_WRITE;
auio.uio_resid = maxsize;
- auio.uio_procp = (struct proc *) 0;
- error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred);
+ auio.uio_td = (struct thread *) 0;
+ error = VOP_WRITE(vp, &auio, ioflags, curthread->td_proc->p_ucred);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;
@@ -1005,7 +1005,7 @@ struct vnode *
vnode_pager_lock(object)
vm_object_t object;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
GIANT_REQUIRED;
@@ -1018,7 +1018,7 @@ vnode_pager_lock(object)
/* XXX; If object->handle can change, we need to cache it. */
while (vget(object->handle,
- LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
+ LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, td)){
if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE))
return NULL;
printf("vnode_pager_lock: retrying\n");
OpenPOWER on IntegriCloud