From 279eafd09f2514d288cf449bcb7edca19ea31d50 Mon Sep 17 00:00:00 2001 From: alc Date: Wed, 21 Jul 1999 18:02:27 +0000 Subject: Fix the following problem: When creating new processes (or performing exec), the new page directory is initialized too early. The kernel might grow before p_vmspace is initialized for the new process. Since pmap_growkernel doesn't yet know about the new page directory, it isn't updated, and subsequent use causes a failure. The fix is (1) to clear p_vmspace early, to stop pmap_growkernel from stomping on memory, and (2) to defer part of the initialization of new page directories until p_vmspace is initialized. PR: kern/12378 Submitted by: tegge Reviewed by: dfr --- sys/alpha/alpha/pmap.c | 18 ++++++++++++++---- sys/amd64/amd64/pmap.c | 19 +++++++++++++++---- sys/i386/i386/pmap.c | 19 +++++++++++++++---- sys/kern/kern_fork.c | 4 +++- sys/vm/pmap.h | 3 ++- sys/vm/vm_glue.c | 4 +++- sys/vm/vm_map.c | 4 +++- 7 files changed, 55 insertions(+), 16 deletions(-) (limited to 'sys') diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c index 75eb9d0..b8a3e7d 100644 --- a/sys/alpha/alpha/pmap.c +++ b/sys/alpha/alpha/pmap.c @@ -43,7 +43,7 @@ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp * with some ideas from NetBSD's alpha pmap - * $Id: pmap.c,v 1.26 1999/06/10 20:40:55 dt Exp $ + * $Id: pmap.c,v 1.27 1999/06/28 09:38:09 peter Exp $ */ /* @@ -1272,9 +1272,6 @@ pmap_pinit(pmap) if ((lev1pg->flags & PG_ZERO) == 0) bzero(pmap->pm_lev1, PAGE_SIZE); - /* wire in kernel global address entries */ - /* XXX copies current process, does not fill in MPPTDI */ - bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE); /* install self-referential address mapping entry (not PG_ASM) */ pmap->pm_lev1[PTLEV1I] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(lev1pg)) @@ -1290,6 +1287,19 @@ pmap_pinit(pmap) bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } +/* + * Wire in kernel global address entries. To avoid a race condition + * between pmap initialization and pmap_growkernel, this procedure + * should be called after the vmspace is attached to the process + * but before this pmap is activated. + */ +void +pmap_pinit2(pmap) + struct pmap *pmap; +{ + bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE); +} + static int pmap_release_free_page(pmap_t pmap, vm_page_t p) { diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 2b5c4fb..28bdc7e 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.240 1999/06/23 21:47:21 luoqi Exp $ + * $Id: pmap.c,v 1.241 1999/07/08 06:05:49 mckusick Exp $ */ /* @@ -1172,9 +1172,6 @@ pmap_pinit(pmap) if ((ptdpg->flags & PG_ZERO) == 0) bzero(pmap->pm_pdir, PAGE_SIZE); - /* wire in kernel global address entries */ - /* XXX copies current process, does not fill in MPPTDI */ - bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); #ifdef SMP pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; #endif @@ -1190,6 +1187,20 @@ pmap_pinit(pmap) bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } +/* + * Wire in kernel global address entries. To avoid a race condition + * between pmap initialization and pmap_growkernel, this procedure + * should be called after the vmspace is attached to the process + * but before this pmap is activated. + */ +void +pmap_pinit2(pmap) + struct pmap *pmap; +{ + /* XXX copies current process, does not fill in MPPTDI */ + bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); +} + static int pmap_release_free_page(pmap, p) struct pmap *pmap; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 2b5c4fb..28bdc7e 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.240 1999/06/23 21:47:21 luoqi Exp $ + * $Id: pmap.c,v 1.241 1999/07/08 06:05:49 mckusick Exp $ */ /* @@ -1172,9 +1172,6 @@ pmap_pinit(pmap) if ((ptdpg->flags & PG_ZERO) == 0) bzero(pmap->pm_pdir, PAGE_SIZE); - /* wire in kernel global address entries */ - /* XXX copies current process, does not fill in MPPTDI */ - bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); #ifdef SMP pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; #endif @@ -1190,6 +1187,20 @@ pmap_pinit(pmap) bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } +/* + * Wire in kernel global address entries. To avoid a race condition + * between pmap initialization and pmap_growkernel, this procedure + * should be called after the vmspace is attached to the process + * but before this pmap is activated. + */ +void +pmap_pinit2(pmap) + struct pmap *pmap; +{ + /* XXX copies current process, does not fill in MPPTDI */ + bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); +} + static int pmap_release_free_page(pmap, p) struct pmap *pmap; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 9c6a66a..633e7e7 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 - * $Id: kern_fork.c,v 1.62 1999/06/30 15:33:34 peter Exp $ + * $Id: kern_fork.c,v 1.63 1999/07/03 20:58:44 peter Exp $ */ #include "opt_ktrace.h" @@ -249,6 +249,8 @@ fork1(p1, flags, procp) newproc->p_wakeup = 0; + newproc->p_vmspace = NULL; + /* * Find an unused process ID. We remember a range of unused IDs * ready to use (from nextpid+1 through pidchecked-1). diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 0e7d1ae..32d1846 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: pmap.h,v 1.29 1999/04/05 19:38:29 julian Exp $ + * $Id: pmap.h,v 1.30 1999/04/23 20:29:57 dt Exp $ */ /* @@ -121,6 +121,7 @@ void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, vm_offset_t pmap_phys_address __P((int)); void pmap_pinit __P((pmap_t)); void pmap_pinit0 __P((pmap_t)); +void pmap_pinit2 __P((pmap_t)); void pmap_protect __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)); void pmap_qenter __P((vm_offset_t, vm_page_t *, int)); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 5221e18..2577686 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -59,7 +59,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_glue.c,v 1.85 1999/04/06 03:11:34 peter Exp $ + * $Id: vm_glue.c,v 1.86 1999/06/19 18:42:49 alc Exp $ */ #include "opt_rlimit.h" @@ -226,6 +226,8 @@ vm_fork(p1, p2, flags) if ((flags & RFMEM) == 0) { p2->p_vmspace = vmspace_fork(p1->p_vmspace); + pmap_pinit2(vmspace_pmap(p2->p_vmspace)); + if (p1->p_vmspace->vm_shm) shmfork(p1, p2); } diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index cbe2a2d..bfebc9c 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_map.c,v 1.171 1999/07/01 19:53:41 peter Exp $ + * $Id: vm_map.c,v 1.172 1999/07/11 18:30:31 alc Exp $ */ /* @@ -2351,6 +2351,7 @@ vmspace_exec(struct proc *p) { */ vmspace_free(oldvmspace); p->p_vmspace = newvmspace; + pmap_pinit2(vmspace_pmap(newvmspace)); if (p == curproc) pmap_activate(p); } @@ -2370,6 +2371,7 @@ vmspace_unshare(struct proc *p) { newvmspace = vmspace_fork(oldvmspace); vmspace_free(oldvmspace); p->p_vmspace = newvmspace; + pmap_pinit2(vmspace_pmap(newvmspace)); if (p == curproc) pmap_activate(p); } -- cgit v1.1