summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2005-06-10 03:33:36 +0000
committeralc <alc@FreeBSD.org>2005-06-10 03:33:36 +0000
commit2d109601cbaeb90e93aad0948a9367a78f196f9d (patch)
tree1fda0878f37dc5e54399f30ad5588c0876f46516 /sys
parent5273b0bf9faa803d4d586ba6d8c03c9d340e85c9 (diff)
downloadFreeBSD-src-2d109601cbaeb90e93aad0948a9367a78f196f9d.zip
FreeBSD-src-2d109601cbaeb90e93aad0948a9367a78f196f9d.tar.gz
Introduce a procedure, pmap_page_init(), that initializes the
vm_page's machine-dependent fields. Use this function in vm_pageq_add_new_page() so that the vm_page's machine-dependent and machine-independent fields are initialized at the same time. Remove code from pmap_init() for initializing the vm_page's machine-dependent fields. Remove stale comments from pmap_init(). Eliminate the Boolean variable pmap_initialized from the alpha, amd64, i386, and ia64 pmap implementations. Its use is no longer required because of the above changes and earlier changes that result in physical memory that is being mapped at initialization time being mapped without pv entries. Tested by: cognet, kensmith, marcel
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c50
-rw-r--r--sys/amd64/amd64/pmap.c46
-rw-r--r--sys/arm/arm/pmap.c29
-rw-r--r--sys/i386/i386/pmap.c46
-rw-r--r--sys/ia64/ia64/pmap.c53
-rw-r--r--sys/powerpc/aim/mmu_oea.c8
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c8
-rw-r--r--sys/powerpc/powerpc/pmap.c8
-rw-r--r--sys/sparc64/sparc64/pmap.c23
-rw-r--r--sys/vm/pmap.h1
-rw-r--r--sys/vm/vm_pageq.c1
11 files changed, 124 insertions, 149 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index b88b84f..dab8df4 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -302,7 +302,6 @@ struct pmap kernel_pmap_store;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static int nklev3, nklev2;
vm_offset_t kernel_vm_end;
@@ -554,29 +553,24 @@ pmap_uses_prom_console()
}
/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+}
+
+/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
- * pmap_init has been enhanced to support in a fairly consistant
- * way, discontiguous physical memory.
*/
void
pmap_init(void)
{
- int i;
-
- /*
- * Allocate memory for random pmap data structures. Includes the
- * pv_head_table.
- */
-
- for(i = 0; i < vm_page_array_size; i++) {
- vm_page_t m;
-
- m = &vm_page_array[i];
- TAILQ_INIT(&m->md.pv_list);
- m->md.pv_list_count = 0;
- }
/*
* init the pv free list
@@ -584,11 +578,6 @@ pmap_init(void)
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_prealloc(pvzone, MINPV);
-
- /*
- * Now it is safe to enable pv_table recording.
- */
- pmap_initialized = TRUE;
}
/*
@@ -1516,7 +1505,7 @@ pmap_remove_all(vm_page_t m)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ if (m->flags & PG_FICTITIOUS) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
}
#endif
@@ -1745,8 +1734,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
- if (pmap_initialized &&
- (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, mpte, m);
managed |= PG_MANAGED;
}
@@ -2056,7 +2044,7 @@ pmap_page_exists_quick(pmap, m)
pv_entry_t pv;
int loops = 0;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return FALSE;
/*
@@ -2162,7 +2150,7 @@ pmap_changebit(vm_page_t m, int bit, boolean_t setem)
pt_entry_t *pte;
int changed;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
+ if ((m->flags & PG_FICTITIOUS) ||
(!setem && bit == (PG_UWE|PG_KWE) &&
(m->flags & PG_WRITEABLE) == 0))
return;
@@ -2247,7 +2235,7 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t *pte;
int count;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return 0;
/*
@@ -2285,7 +2273,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (rv);
/*
@@ -2335,7 +2323,7 @@ pmap_clear_modify(vm_page_t m)
pv_entry_t pv;
pt_entry_t *pte;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return;
/*
@@ -2364,7 +2352,7 @@ pmap_clear_reference(vm_page_t m)
pv_entry_t pv;
pt_entry_t *pte;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return;
/*
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3f8981e..507e09c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -165,7 +165,6 @@ vm_paddr_t avail_start; /* PA of first available physical page */
vm_paddr_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static int nkpt;
static int ndmpdp;
@@ -531,29 +530,24 @@ pmap_bootstrap(firstaddr)
}
/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+}
+
+/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
- * pmap_init has been enhanced to support in a fairly consistant
- * way, discontiguous physical memory.
*/
void
pmap_init(void)
{
- int i;
-
- /*
- * Allocate memory for random pmap data structures. Includes the
- * pv_head_table.
- */
-
- for(i = 0; i < vm_page_array_size; i++) {
- vm_page_t m;
-
- m = &vm_page_array[i];
- TAILQ_INIT(&m->md.pv_list);
- m->md.pv_list_count = 0;
- }
/*
* init the pv free list
@@ -561,11 +555,6 @@ pmap_init(void)
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_prealloc(pvzone, MINPV);
-
- /*
- * Now it is safe to enable pv_table recording.
- */
- pmap_initialized = TRUE;
}
/*
@@ -1675,7 +1664,7 @@ pmap_remove_all(vm_page_t m)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx",
VM_PAGE_TO_PHYS(m));
}
@@ -1955,8 +1944,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
- if (pmap_initialized &&
- (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
pa |= PG_MANAGED;
}
@@ -2449,7 +2437,7 @@ pmap_page_exists_quick(pmap, m)
pv_entry_t pv;
int loops = 0;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return FALSE;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -2567,7 +2555,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (rv);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -2628,7 +2616,7 @@ pmap_clear_ptes(vm_page_t m, long bit)
register pv_entry_t pv;
pt_entry_t pbits, *pte;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
+ if ((m->flags & PG_FICTITIOUS) ||
(bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
return;
@@ -2713,7 +2701,7 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t v;
int rtval = 0;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (rtval);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index f7d80d8..e98e2ba 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -1927,32 +1927,27 @@ pmap_pinit0(struct pmap *pmap)
bcopy(kernel_pmap, pmap, sizeof(*pmap));
}
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+}
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
- * pmap_init has been enhanced to support in a fairly consistant
- * way, discontiguous physical memory.
*/
void
pmap_init(void)
{
- int i;
PDEBUG(1, printf("pmap_init: phys_start = %08x\n"));
- /*
- * Allocate memory for random pmap data structures. Includes the
- * pv_head_table.
- */
-
- for(i = 0; i < vm_page_array_size; i++) {
- vm_page_t m;
-
- m = &vm_page_array[i];
- TAILQ_INIT(&m->md.pv_list);
- m->md.pv_list_count = 0;
- }
/*
* init the pv free list
@@ -3156,7 +3151,7 @@ pmap_remove_all(vm_page_t m)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ if (m->flags & PG_FICTITIOUS) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
}
#endif
@@ -4331,7 +4326,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
int loops = 0;
int s;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (FALSE);
s = splvm();
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 97020e2..5400006 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -190,7 +190,6 @@ static struct mtx allpmaps_lock;
vm_paddr_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
int pgeflag = 0; /* PG_G or-in */
int pseflag = 0; /* PG_PS or-in */
@@ -443,6 +442,17 @@ pmap_set_pg(void)
}
}
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+}
+
#ifdef PAE
static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
@@ -460,26 +470,10 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
- * pmap_init has been enhanced to support in a fairly consistant
- * way, discontiguous physical memory.
*/
void
pmap_init(void)
{
- int i;
-
- /*
- * Allocate memory for random pmap data structures. Includes the
- * pv_head_table.
- */
-
- for(i = 0; i < vm_page_array_size; i++) {
- vm_page_t m;
-
- m = &vm_page_array[i];
- TAILQ_INIT(&m->md.pv_list);
- m->md.pv_list_count = 0;
- }
/*
* init the pv free list
@@ -494,11 +488,6 @@ pmap_init(void)
UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
#endif
-
- /*
- * Now it is safe to enable pv_table recording.
- */
- pmap_initialized = TRUE;
}
/*
@@ -1714,7 +1703,7 @@ pmap_remove_all(vm_page_t m)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x",
VM_PAGE_TO_PHYS(m));
}
@@ -1997,8 +1986,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
- if (pmap_initialized &&
- (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
pa |= PG_MANAGED;
}
@@ -2525,7 +2513,7 @@ pmap_page_exists_quick(pmap, m)
pv_entry_t pv;
int loops = 0;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return FALSE;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -2645,7 +2633,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (rv);
sched_pin();
@@ -2708,7 +2696,7 @@ pmap_clear_ptes(vm_page_t m, int bit)
register pv_entry_t pv;
pt_entry_t pbits, *pte;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
+ if ((m->flags & PG_FICTITIOUS) ||
(bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
return;
@@ -2800,7 +2788,7 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t v;
int rtval = 0;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (rtval);
sched_pin();
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 5a14f04..647a7c6 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -154,7 +154,6 @@ struct pmap kernel_pmap_store;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
vm_offset_t vhpt_base, vhpt_size;
struct mtx pmap_vhptmutex;
@@ -444,29 +443,24 @@ pmap_bootstrap()
}
/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+}
+
+/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
- * pmap_init has been enhanced to support in a fairly consistant
- * way, discontiguous physical memory.
*/
void
pmap_init(void)
{
- int i;
-
- /*
- * Allocate memory for random pmap data structures. Includes the
- * pv_head_table.
- */
-
- for(i = 0; i < vm_page_array_size; i++) {
- vm_page_t m;
-
- m = &vm_page_array[i];
- TAILQ_INIT(&m->md.pv_list);
- m->md.pv_list_count = 0;
- }
/*
* Init the pv free list and the PTE free list.
@@ -478,11 +472,6 @@ pmap_init(void)
ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
uma_prealloc(ptezone, MINPV);
-
- /*
- * Now it is safe to enable pv_table recording.
- */
- pmap_initialized = TRUE;
}
/*
@@ -1391,7 +1380,7 @@ pmap_remove_all(vm_page_t m)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
+ if (m->flags & PG_FICTITIOUS) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
}
#endif
@@ -1572,8 +1561,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Enter on the PV list if part of our managed memory.
*/
- if (pmap_initialized &&
- (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
managed = TRUE;
}
@@ -1648,8 +1636,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
/*
* Enter on the PV list since its part of our managed memory.
*/
- if (pmap_initialized &&
- (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
managed = TRUE;
}
@@ -1809,7 +1796,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
int loops = 0;
int s;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return FALSE;
s = splvm();
@@ -1932,7 +1919,7 @@ pmap_ts_referenced(vm_page_t m)
pv_entry_t pv;
int count = 0;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return 0;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -1965,7 +1952,7 @@ pmap_is_referenced(vm_page_t m)
{
pv_entry_t pv;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return FALSE;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -1996,7 +1983,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return (rv);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2041,7 +2028,7 @@ pmap_clear_modify(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2071,7 +2058,7 @@ pmap_clear_reference(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
- if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+ if (m->flags & PG_FICTITIOUS)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 8c6808c..2ea798d 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1136,6 +1136,14 @@ pmap_growkernel(vm_offset_t addr)
{
}
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+}
+
void
pmap_init(void)
{
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 8c6808c..2ea798d 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -1136,6 +1136,14 @@ pmap_growkernel(vm_offset_t addr)
{
}
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+}
+
void
pmap_init(void)
{
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 8c6808c..2ea798d 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -1136,6 +1136,14 @@ pmap_growkernel(vm_offset_t addr)
{
}
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+}
+
void
pmap_init(void)
{
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 884b476..8d62058 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -555,6 +555,19 @@ pmap_bootstrap_alloc(vm_size_t size)
}
/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.tte_list);
+ m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
+ m->md.flags = 0;
+ m->md.pmap = NULL;
+}
+
+/*
* Initialize the pmap module.
*/
void
@@ -565,16 +578,6 @@ pmap_init(void)
int result;
int i;
- for (i = 0; i < vm_page_array_size; i++) {
- vm_page_t m;
-
- m = &vm_page_array[i];
- TAILQ_INIT(&m->md.tte_list);
- m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
- m->md.flags = 0;
- m->md.pmap = NULL;
- }
-
for (i = 0; i < translations_size; i++) {
addr = translations[i].om_start;
size = translations[i].om_size;
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 6c9df32..4eb8f5a 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -113,6 +113,7 @@ vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
+void pmap_page_init(vm_page_t m);
void pmap_page_protect(vm_page_t m, vm_prot_t prot);
void pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);
diff --git a/sys/vm/vm_pageq.c b/sys/vm/vm_pageq.c
index 7cdf86b..1ed1d3e 100644
--- a/sys/vm/vm_pageq.c
+++ b/sys/vm/vm_pageq.c
@@ -143,6 +143,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
m->phys_addr = pa;
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
+ pmap_page_init(m);
vm_pageq_enqueue(m->pc + PQ_FREE, m);
return (m);
}
OpenPOWER on IntegriCloud