diff options
author | jake <jake@FreeBSD.org> | 2002-03-17 00:56:41 +0000 |
---|---|---|
committer | jake <jake@FreeBSD.org> | 2002-03-17 00:56:41 +0000 |
commit | 34dcf8975d103d034d7e8b6788c0645cc93af254 (patch) | |
tree | 8cbe6dabf99b708d371412e48b9c89cb8897e02b | |
parent | 80729196189279faca005d50e34c4dfc09122e3c (diff) | |
download | FreeBSD-src-34dcf8975d103d034d7e8b6788c0645cc93af254.zip FreeBSD-src-34dcf8975d103d034d7e8b6788c0645cc93af254.tar.gz |
Convert all pmap_kenter/pmap_kremove pairs in MI code to use pmap_qenter/
pmap_qremove. pmap_kenter is not safe to use in MI code because it is not
guaranteed to flush the mapping from the tlb on all cpus. If the process
in question is preempted and migrates cpus between the call to pmap_kenter
and pmap_kremove, the original cpu will be left with stale mappings in its
tlb. This is currently not a problem for i386 because we do not use PG_G on
SMP, and thus all mappings are flushed from the tlb on context switches, not
just user mappings. This is not the case on all architectures, and if PG_G
is to be used with SMP on i386 it will be a problem. This was committed by
peter earlier as part of his fine grained tlb shootdown work for i386, which
was backed out for other reasons.
Reviewed by: peter
-rw-r--r-- | sys/kern/kern_exec.c | 4 | ||||
-rw-r--r-- | sys/kern/sys_process.c | 4 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_pager.c | 5 | ||||
-rw-r--r-- | sys/vm/vm_zone.c | 2 |
5 files changed, 10 insertions, 9 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 72ae18e..0899960 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -551,7 +551,7 @@ exec_map_first_page(imgp) vm_page_wire(ma[0]); vm_page_wakeup(ma[0]); - pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0])); + pmap_qenter((vm_offset_t)imgp->image_header, ma, 1); imgp->firstpage = ma[0]; return 0; @@ -564,7 +564,7 @@ exec_unmap_first_page(imgp) GIANT_REQUIRED; if (imgp->firstpage) { - pmap_kremove((vm_offset_t) imgp->image_header); + pmap_qremove((vm_offset_t)imgp->image_header, 1); vm_page_unwire(imgp->firstpage, 1); imgp->firstpage = NULL; } diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index 39ff515..a6780d4 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -278,14 +278,14 @@ proc_rwmem(struct proc *p, struct uio *uio) vm_object_reference(object); vm_map_lookup_done(tmap, out_entry); - pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); + pmap_qenter(kva, &m, 1); /* * Now do the i/o move. */ error = uiomove((caddr_t)(kva + page_offset), len, uio); - pmap_kremove(kva); + pmap_qremove(kva, 1); /* * release the page and the object diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 87b93ba..708cd34 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -3324,7 +3324,7 @@ tryagain: vm_page_wire(p); p->valid = VM_PAGE_BITS_ALL; vm_page_flag_clear(p, PG_ZERO); - pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); + pmap_qenter(pg, &p, 1); bp->b_pages[index] = p; vm_page_wakeup(p); } @@ -3353,7 +3353,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) bp->b_blkno, bp->b_lblkno); } bp->b_pages[index] = NULL; - pmap_kremove(pg); + pmap_qremove(pg, 1); vm_page_busy(p); vm_page_unwire(p, 0); vm_page_free(p); diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index ae71996..60f7e72 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -318,7 +318,7 @@ vm_pager_map_page(m) vm_offset_t kva; kva = kmem_alloc_wait(pager_map, PAGE_SIZE); - pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); + pmap_qenter(kva, &m, 1); return (kva); } @@ -326,7 +326,8 @@ void vm_pager_unmap_page(kva) vm_offset_t kva; { - pmap_kremove(kva); + + pmap_qremove(kva, 1); kmem_free_wakeup(pager_map, kva, PAGE_SIZE); } diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c index 769d411..2d967c9 100644 --- a/sys/vm/vm_zone.c +++ b/sys/vm/vm_zone.c @@ -386,7 +386,7 @@ _zget(vm_zone_t z) break; zkva = z->zkva + z->zpagecount * PAGE_SIZE; - pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); + pmap_qenter(zkva, &m, 1); bzero((caddr_t) zkva, PAGE_SIZE); z->zpagecount++; atomic_add_int(&zone_kmem_pages, 1); |