summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-07-10 07:03:37 +0000
committeralc <alc@FreeBSD.org>2006-07-10 07:03:37 +0000
commit3150e69985bbc84a982a1dd15be1e425f8ebdea5 (patch)
tree07d914886b368cac767af4474a8cf72fc59255d5 /sys/powerpc
parent1d0f81515f8698abc11a886e6f8f77ef5c52898b (diff)
downloadFreeBSD-src-3150e69985bbc84a982a1dd15be1e425f8ebdea5.zip
FreeBSD-src-3150e69985bbc84a982a1dd15be1e425f8ebdea5.tar.gz
Add synchronization to moea_zero_page() and moea_zero_page_area().
Remove the acquisition and release of Giant from moea_zero_page_idle(). Tested by: grehan@
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c25
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c25
2 files changed, 34 insertions, 16 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index df06c4a..9729fa3 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -264,6 +264,7 @@ SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
&moea_pte_spills, 0, "");
struct pvo_entry *moea_pvo_zeropage;
+struct mtx moea_pvo_zeropage_mtx;
vm_offset_t moea_rkva_start = VM_MIN_KERNEL_ADDRESS;
u_int moea_rkva_count = 4;
@@ -991,8 +992,12 @@ moea_zero_page(mmu_t mmu, vm_page_t m)
if (pa < SEGMENT_LENGTH) {
va = (caddr_t) pa;
} else if (moea_initialized) {
- if (moea_pvo_zeropage == NULL)
+ if (moea_pvo_zeropage == NULL) {
moea_pvo_zeropage = moea_rkva_alloc(mmu);
+ mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page",
+ NULL, MTX_DEF);
+ }
+ mtx_lock(&moea_pvo_zeropage_mtx);
moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL);
va = (caddr_t)PVO_VADDR(moea_pvo_zeropage);
} else {
@@ -1001,8 +1006,10 @@ moea_zero_page(mmu_t mmu, vm_page_t m)
bzero(va, PAGE_SIZE);
- if (pa >= SEGMENT_LENGTH)
+ if (pa >= SEGMENT_LENGTH) {
moea_pa_unmap(moea_pvo_zeropage, NULL, NULL);
+ mtx_unlock(&moea_pvo_zeropage_mtx);
+ }
}
void
@@ -1014,8 +1021,12 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
if (pa < SEGMENT_LENGTH) {
va = (caddr_t) pa;
} else if (moea_initialized) {
- if (moea_pvo_zeropage == NULL)
+ if (moea_pvo_zeropage == NULL) {
moea_pvo_zeropage = moea_rkva_alloc(mmu);
+ mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page",
+ NULL, MTX_DEF);
+ }
+ mtx_lock(&moea_pvo_zeropage_mtx);
moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL);
va = (caddr_t)PVO_VADDR(moea_pvo_zeropage);
} else {
@@ -1024,19 +1035,17 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
bzero(va + off, size);
- if (pa >= SEGMENT_LENGTH)
+ if (pa >= SEGMENT_LENGTH) {
moea_pa_unmap(moea_pvo_zeropage, NULL, NULL);
+ mtx_unlock(&moea_pvo_zeropage_mtx);
+ }
}
void
moea_zero_page_idle(mmu_t mmu, vm_page_t m)
{
- /* XXX this is called outside of Giant, is moea_zero_page safe? */
- /* XXX maybe have a dedicated mapping for this to avoid the problem? */
- mtx_lock(&Giant);
moea_zero_page(mmu, m);
- mtx_unlock(&Giant);
}
/*
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index df06c4a..9729fa3 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -264,6 +264,7 @@ SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
&moea_pte_spills, 0, "");
struct pvo_entry *moea_pvo_zeropage;
+struct mtx moea_pvo_zeropage_mtx;
vm_offset_t moea_rkva_start = VM_MIN_KERNEL_ADDRESS;
u_int moea_rkva_count = 4;
@@ -991,8 +992,12 @@ moea_zero_page(mmu_t mmu, vm_page_t m)
if (pa < SEGMENT_LENGTH) {
va = (caddr_t) pa;
} else if (moea_initialized) {
- if (moea_pvo_zeropage == NULL)
+ if (moea_pvo_zeropage == NULL) {
moea_pvo_zeropage = moea_rkva_alloc(mmu);
+ mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page",
+ NULL, MTX_DEF);
+ }
+ mtx_lock(&moea_pvo_zeropage_mtx);
moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL);
va = (caddr_t)PVO_VADDR(moea_pvo_zeropage);
} else {
@@ -1001,8 +1006,10 @@ moea_zero_page(mmu_t mmu, vm_page_t m)
bzero(va, PAGE_SIZE);
- if (pa >= SEGMENT_LENGTH)
+ if (pa >= SEGMENT_LENGTH) {
moea_pa_unmap(moea_pvo_zeropage, NULL, NULL);
+ mtx_unlock(&moea_pvo_zeropage_mtx);
+ }
}
void
@@ -1014,8 +1021,12 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
if (pa < SEGMENT_LENGTH) {
va = (caddr_t) pa;
} else if (moea_initialized) {
- if (moea_pvo_zeropage == NULL)
+ if (moea_pvo_zeropage == NULL) {
moea_pvo_zeropage = moea_rkva_alloc(mmu);
+ mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page",
+ NULL, MTX_DEF);
+ }
+ mtx_lock(&moea_pvo_zeropage_mtx);
moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL);
va = (caddr_t)PVO_VADDR(moea_pvo_zeropage);
} else {
@@ -1024,19 +1035,17 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
bzero(va + off, size);
- if (pa >= SEGMENT_LENGTH)
+ if (pa >= SEGMENT_LENGTH) {
moea_pa_unmap(moea_pvo_zeropage, NULL, NULL);
+ mtx_unlock(&moea_pvo_zeropage_mtx);
+ }
}
void
moea_zero_page_idle(mmu_t mmu, vm_page_t m)
{
- /* XXX this is called outside of Giant, is moea_zero_page safe? */
- /* XXX maybe have a dedicated mapping for this to avoid the problem? */
- mtx_lock(&Giant);
moea_zero_page(mmu, m);
- mtx_unlock(&Giant);
}
/*
OpenPOWER on IntegriCloud