summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
committerdillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
commite028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch)
tree7420cce169451a74c5b87963467a4aeff668ed12 /sys/powerpc
parent0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff)
downloadFreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip
FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to formalize the removal of Giant, making it easy to test in a more piecemeal fashion. These macros will allow us to test fine-grained locks to a degree before removing Giant, and also after, and to remove Giant in a piecemeal fashion via sysctl's on those subsystems which the authors believe can operate without Giant.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/vm_machdep.c21
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c21
2 files changed, 18 insertions, 24 deletions
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index c281ef1..4651cef 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -185,14 +185,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -241,10 +240,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -260,7 +260,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -278,10 +277,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -289,7 +289,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -347,17 +346,13 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
-
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -386,8 +381,10 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index c281ef1..4651cef 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -185,14 +185,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -241,10 +240,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -260,7 +260,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -278,10 +277,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -289,7 +289,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -347,17 +346,13 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
-
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -386,8 +381,10 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
OpenPOWER on IntegriCloud