summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2010-10-01 18:59:30 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2010-10-01 18:59:30 +0000
commit92b2ae5c12fa4b3bf4fbd8fee9b79f0be3043371 (patch)
treea0e1de63a6deba1750e036100a7f00374ed57ebb /sys/powerpc
parente6e606670faf6035037421bce732134b55159061 (diff)
downloadFreeBSD-src-92b2ae5c12fa4b3bf4fbd8fee9b79f0be3043371.zip
FreeBSD-src-92b2ae5c12fa4b3bf4fbd8fee9b79f0be3043371.tar.gz
Fix pmap_page_set_memattr() behavior in the presence of fictitious pages
by just caching the mode for later use by pmap_enter(), following amd64. While here, correct some mismerges from mmu_oea64 -> mmu_oea and clean up some dead code found while fixing the fictitious page behavior.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c32
-rw-r--r--sys/powerpc/aim/mmu_oea64.c30
2 files changed, 20 insertions, 42 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index e832037..dad3eda 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -221,8 +221,6 @@ u_int moea_pteg_mask;
struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */
struct pvo_head moea_pvo_kunmanaged =
LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */
-struct pvo_head moea_pvo_unmanaged =
- LIST_HEAD_INITIALIZER(moea_pvo_unmanaged); /* list of unmanaged pages */
uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */
@@ -463,22 +461,6 @@ va_to_pteg(u_int sr, vm_offset_t addr)
}
static __inline struct pvo_head *
-pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
-{
- struct vm_page *pg;
-
- pg = PHYS_TO_VM_PAGE(pa);
-
- if (pg_p != NULL)
- *pg_p = pg;
-
- if (pg == NULL)
- return (&moea_pvo_unmanaged);
-
- return (&pg->md.mdpg_pvoh);
-}
-
-static __inline struct pvo_head *
vm_page_to_pvoh(vm_page_t m)
{
@@ -919,6 +901,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
struct vm_page m;
m.phys_addr = translations[i].om_pa + off;
+ m.md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
m.oflags = VPO_BUSY;
PMAP_LOCK(&ofw_pmap);
moea_enter_locked(&ofw_pmap,
@@ -1168,7 +1151,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
}
- pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), VM_MEMATTR_DEFAULT);
+ pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
if (prot & VM_PROT_WRITE) {
pte_lo |= PTE_BW;
@@ -1450,16 +1433,23 @@ void
moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
{
struct pvo_entry *pvo;
+ struct pvo_head *pvo_head;
struct pte *pt;
pmap_t pmap;
u_int lo;
+ if (m->flags & PG_FICTITIOUS) {
+ m->md.mdpg_cache_attrs = ma;
+ return;
+ }
+
vm_page_lock_queues();
+ pvo_head = vm_page_to_pvoh(m);
lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
- LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
+
+ LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
- mtx_lock(&moea_table_mutex);
pt = moea_pvo_to_pte(pvo, -1);
pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
pvo->pvo_pte.pte.pte_lo |= lo;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index ecd8f6a..0eef6a5 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -298,11 +298,8 @@ u_int moea64_pteg_mask;
* PVO data.
*/
struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
-/* lists of unmanaged pages */
-struct pvo_head moea64_pvo_kunmanaged =
+struct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */
LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
-struct pvo_head moea64_pvo_unmanaged =
- LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged);
uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
@@ -495,22 +492,6 @@ va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
}
static __inline struct pvo_head *
-pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
-{
- struct vm_page *pg;
-
- pg = PHYS_TO_VM_PAGE(pa);
-
- if (pg_p != NULL)
- *pg_p = pg;
-
- if (pg == NULL)
- return (&moea64_pvo_unmanaged);
-
- return (&pg->md.mdpg_pvoh);
-}
-
-static __inline struct pvo_head *
vm_page_to_pvoh(vm_page_t m)
{
@@ -1917,13 +1898,20 @@ void
moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
{
struct pvo_entry *pvo;
+ struct pvo_head *pvo_head;
struct lpte *pt;
pmap_t pmap;
uint64_t lo;
+ if (m->flags & PG_FICTITIOUS) {
+ m->md.mdpg_cache_attrs = ma;
+ return;
+ }
+
vm_page_lock_queues();
+ pvo_head = vm_page_to_pvoh(m);
lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
- LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
+ LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
LOCK_TABLE();
OpenPOWER on IntegriCloud