diff options
author | das <das@FreeBSD.org> | 2004-11-20 02:28:48 +0000 |
---|---|---|
committer | das <das@FreeBSD.org> | 2004-11-20 02:28:48 +0000 |
commit | 22907ad4acaa6f480676f7f29ddf72a20cb9f482 (patch) | |
tree | 6ad522af93fdc47bddb1b00e61feb2ada3eb6e90 /sys | |
parent | bab4eb89e3e1c47f0551fa9f8ae6e8cb3c68bffc (diff) | |
download | FreeBSD-src-22907ad4acaa6f480676f7f29ddf72a20cb9f482.zip FreeBSD-src-22907ad4acaa6f480676f7f29ddf72a20cb9f482.tar.gz |
Malloc p_stats instead of putting it in the U area. We should consider
simply embedding it in struct proc.
Reviewed by: arch@
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/init_main.c | 8 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 49 |
3 files changed, 49 insertions, 12 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 4b94b5b..ce2276b 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -422,6 +422,8 @@ proc0_init(void *dummy __unused) p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3; p->p_cpulimit = RLIM_INFINITY; + p->p_stats = pstats_alloc(); + /* Allocate a prototype map so we have something to fork. */ pmap_pinit0(vmspace_pmap(&vmspace0)); p->p_vmspace = &vmspace0; @@ -431,12 +433,6 @@ proc0_init(void *dummy __unused) vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0); /* - * We continue to place resource usage info - * in the user struct so that it's pageable. - */ - p->p_stats = &p->p_uarea->u_stats; - - /* * Charge root for one process. */ (void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 9d90b1c..2e9830a 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -489,7 +489,6 @@ again: /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. - * The p_stats substruct is set in vm_forkproc. */ p2->p_flag = 0; if (p1->p_flag & P_PROFIL) @@ -527,6 +526,9 @@ again: * p_limit is copy-on-write. Bump its refcount. */ p2->p_limit = lim_hold(p1->p_limit); + + pstats_fork(p1->p_stats, p2->p_stats); + PROC_UNLOCK(p1); PROC_UNLOCK(p2); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 7586b44..63393b3 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -96,9 +96,7 @@ uma_zone_t proc_zone; uma_zone_t ithread_zone; int kstack_pages = KSTACK_PAGES; -int uarea_pages = UAREA_PAGES; SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, ""); -SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, ""); CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); @@ -180,11 +178,11 @@ proc_init(void *mem, int size, int flags) p = (struct proc *)mem; p->p_sched = (struct p_sched *)&p[1]; - vm_proc_new(p); td = thread_alloc(); kg = ksegrp_alloc(); bzero(&p->p_mtx, sizeof(struct mtx)); mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); + p->p_stats = pstats_alloc(); proc_linkup(p, kg, td); sched_newproc(p, kg, td); return (0); @@ -660,8 +658,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp) kp->ki_size = vm->vm_map.size; kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ - if (p->p_sflag & PS_INMEM) - kp->ki_rssize += UAREA_PAGES; FOREACH_THREAD_IN_PROC(p, td0) { if (!TD_IS_SWAPPED(td0)) kp->ki_rssize += td0->td_kstack_pages; @@ -805,6 +801,49 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp) } /* + * Fill a 'struct user' for backwards compatibility with a.out core dumps. + * This is used by the aout, linux, and pecoff modules. + */ +void +fill_user(struct proc *p, struct user *u) +{ + + PROC_LOCK_ASSERT(p, MA_OWNED); + bcopy(&p->p_stats, &u->u_stats, sizeof(struct pstats)); + fill_kinfo_proc(p, &u->u_kproc); +} + +struct pstats * +pstats_alloc(void) +{ + + return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); +} + +/* + * Copy parts of p_stats; zero the rest of p_stats (statistics). + */ +void +pstats_fork(struct pstats *src, struct pstats *dst) +{ + +#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) + + bzero(&dst->pstat_startzero, + (unsigned)RANGEOF(struct pstats, pstat_startzero, pstat_endzero)); + bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, + (unsigned)RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy)); +#undef RANGEOF +} + +void +pstats_free(struct pstats *ps) +{ + + free(ps, M_SUBPROC); +} + +/* * Locate a zombie process by number */ struct proc * |