summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authorgallatin <gallatin@FreeBSD.org>2001-05-21 16:04:24 +0000
committergallatin <gallatin@FreeBSD.org>2001-05-21 16:04:24 +0000
commit1c57c3b027ac5130744e1c6856910c6af48734c7 (patch)
tree2025a749bc062d77e9587e449fbe767ce5b11422 /sys/powerpc
parentb719eb0861e7eb54491a4f5c53af093f85416a70 (diff)
downloadFreeBSD-src-1c57c3b027ac5130744e1c6856910c6af48734c7.zip
FreeBSD-src-1c57c3b027ac5130744e1c6856910c6af48734c7.tar.gz
catch these files up to their i386 neighbors to make alpha boot
prior to the vm_mtx
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/vm_machdep.c22
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c22
2 files changed, 32 insertions, 12 deletions
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index edc2638..28184ad3 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -275,11 +275,14 @@ void
cpu_wait(p)
struct proc *p;
{
+
+ mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
+ mtx_unlock(&vm_mtx);
}
/*
@@ -331,6 +334,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
+ mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -346,6 +350,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
+ mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -366,6 +371,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
+ mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -373,6 +379,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
+ mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -430,12 +437,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
+ if (mtx_trylock(&vm_mtx) == 0)
+ return (0);
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
+ mtx_unlock(&vm_mtx);
return(0);
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ }
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
+ mtx_unlock(&vm_mtx);
return(0);
+ }
- if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -464,10 +476,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&Giant);
+ mtx_unlock(&vm_mtx);
return (1);
- }
- return (0);
}
/*
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index edc2638..28184ad3 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -275,11 +275,14 @@ void
cpu_wait(p)
struct proc *p;
{
+
+ mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
+ mtx_unlock(&vm_mtx);
}
/*
@@ -331,6 +334,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
+ mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -346,6 +350,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
+ mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -366,6 +371,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
+ mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -373,6 +379,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
+ mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -430,12 +437,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
+ if (mtx_trylock(&vm_mtx) == 0)
+ return (0);
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
+ mtx_unlock(&vm_mtx);
return(0);
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ }
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
+ mtx_unlock(&vm_mtx);
return(0);
+ }
- if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -464,10 +476,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&Giant);
+ mtx_unlock(&vm_mtx);
return (1);
- }
- return (0);
}
/*
OpenPOWER on IntegriCloud