diff options
author | peter <peter@FreeBSD.org> | 2002-07-08 04:24:26 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2002-07-08 04:24:26 +0000 |
commit | 62e40d1277fdd1a346ff9f3f25a2ea493d6361ae (patch) | |
tree | 03dd4cf988623663014cdcc79fa1f7ab1fb086c1 /sys/powerpc | |
parent | a01296978c4bac0e78281b2889451de806ff5009 (diff) | |
download | FreeBSD-src-62e40d1277fdd1a346ff9f3f25a2ea493d6361ae.zip FreeBSD-src-62e40d1277fdd1a346ff9f3f25a2ea493d6361ae.tar.gz |
Add a special page zero entry point intended to be called via the single
threaded VM pagezero kthread outside of Giant. For some platforms, this
is really easy since it can just use the direct mapped region. For others,
IPI sending is involved or there are other issues, so grab Giant when
needed.
We still have preemption issues to deal with, but Alan Cox has an
interesting suggestion on how to minimize the problem on x86.
Use Luigi's hack for preserving the (lack of) priority.
Turn the idle zeroing back on since it can now actually do something useful
outside of Giant in many cases.
Diffstat (limited to 'sys/powerpc')
-rw-r--r-- | sys/powerpc/aim/mmu_oea.c | 11 | ||||
-rw-r--r-- | sys/powerpc/powerpc/mmu_oea.c | 11 | ||||
-rw-r--r-- | sys/powerpc/powerpc/pmap.c | 11 |
3 files changed, 33 insertions, 0 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index 817f7f1..187e4b3 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size) TODO; } +void +pmap_zero_page_idle(vm_page_t m) +{ + + /* XXX this is called outside of Giant, is pmap_zero_page safe? */ + /* XXX maybe have a dedicated mapping for this to avoid the problem? */ + mtx_lock(&Giant); + pmap_zero_page(m); + mtx_unlock(&Giant); +} + /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c index 817f7f1..187e4b3 100644 --- a/sys/powerpc/powerpc/mmu_oea.c +++ b/sys/powerpc/powerpc/mmu_oea.c @@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size) TODO; } +void +pmap_zero_page_idle(vm_page_t m) +{ + + /* XXX this is called outside of Giant, is pmap_zero_page safe? */ + /* XXX maybe have a dedicated mapping for this to avoid the problem? */ + mtx_lock(&Giant); + pmap_zero_page(m); + mtx_unlock(&Giant); +} + /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c index 817f7f1..187e4b3 100644 --- a/sys/powerpc/powerpc/pmap.c +++ b/sys/powerpc/powerpc/pmap.c @@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size) TODO; } +void +pmap_zero_page_idle(vm_page_t m) +{ + + /* XXX this is called outside of Giant, is pmap_zero_page safe? */ + /* XXX maybe have a dedicated mapping for this to avoid the problem? */ + mtx_lock(&Giant); + pmap_zero_page(m); + mtx_unlock(&Giant); +} + /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page |