summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2004-08-09 20:44:41 +0000
committermarcel <marcel@FreeBSD.org>2004-08-09 20:44:41 +0000
commit1e00fe7b057fa9e2896700e40f006617e0d70e6c (patch)
tree8cfcefc4b6f79b35021de3d83c333ca1875fd57f /sys/ia64
parent38d3d854febde45875e709b943a720af3ac601da (diff)
downloadFreeBSD-src-1e00fe7b057fa9e2896700e40f006617e0d70e6c.zip
FreeBSD-src-1e00fe7b057fa9e2896700e40f006617e0d70e6c.tar.gz
Better preserve the original protection for the mappings we maintain.
The hardware always gives read access for privilege level 0, which means that we cannot use the hardware access rights and privilege level in the PTE to test whether there's a change in protection. So, we save the original vm_prot_t in the PTE as well. Add pmap_pte_prot() to set the proper access rights and privilege level on the PTE given a pmap and the requested protection. The above allows us to compare the protection in pmap_extract_and_hold() which was missing. While in pmap_extract_and_hold(), add pmap locking. While here, clean up most (i.e. all but one) PTE macros we inherited from alpha. They were either unused, used inconsistently, badly named or simply weren't beneficial. We save the wired and managed state of the PTE in distinct (bit) fields. While in pte.h, s/u_int64_t/uint64_t/g pmap locking obtained from: alc@ feedback & review by: alc@
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/ia64/pmap.c263
-rw-r--r--sys/ia64/include/pte.h99
2 files changed, 140 insertions, 222 deletions
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 8dff93d..8525dcb 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -124,52 +124,15 @@ MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures");
#define PMAP_SHPGPERPROC 200
#endif
-#if defined(DIAGNOSTIC)
-#define PMAP_DIAGNOSTIC
-#endif
-
#define MINPV 2048 /* Preallocate at least this many */
-#if 0
-#define PMAP_DIAGNOSTIC
-#define PMAP_DEBUG
-#endif
-
-#if !defined(PMAP_DIAGNOSTIC)
+#if !defined(DIAGNOSTIC)
#define PMAP_INLINE __inline
#else
#define PMAP_INLINE
#endif
-/*
- * Get PDEs and PTEs for user/kernel address space
- */
-#define pmap_pte_w(pte) ((pte)->pte_ig & PTE_IG_WIRED)
-#define pmap_pte_managed(pte) ((pte)->pte_ig & PTE_IG_MANAGED)
-#define pmap_pte_v(pte) ((pte)->pte_p)
#define pmap_pte_pa(pte) (((pte)->pte_ppn) << 12)
-#define pmap_pte_prot(pte) (((pte)->pte_ar << 2) | (pte)->pte_pl)
-
-#define pmap_pte_set_w(pte, v) ((v)?((pte)->pte_ig |= PTE_IG_WIRED) \
- :((pte)->pte_ig &= ~PTE_IG_WIRED))
-#define pmap_pte_set_prot(pte, v) do { \
- (pte)->pte_ar = v >> 2; \
- (pte)->pte_pl = v & 3; \
-} while (0)
-
-/*
- * Given a map and a machine independent protection code,
- * convert to an ia64 protection code.
- */
-#define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p])
-#define pte_prot_pl(m, p) (pte_prot(m, p) & 3)
-#define pte_prot_ar(m, p) (pte_prot(m, p) >> 2)
-int protection_codes[2][8];
-
-/*
- * Return non-zero if this pmap is currently active
- */
-#define pmap_isactive(pmap) (pmap->pm_active)
/*
* Statically allocated kernel pmap
@@ -244,7 +207,6 @@ SYSCTL_INT(_vm_stats_vhpt, OID_AUTO, resident, CTLFLAG_RD,
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
-static void ia64_protection_init(void);
static pmap_t pmap_install(pmap_t);
static void pmap_invalidate_all(pmap_t pmap);
@@ -438,11 +400,6 @@ pmap_bootstrap()
virtual_end = VM_MAX_KERNEL_ADDRESS;
/*
- * Initialize protection array.
- */
- ia64_protection_init();
-
- /*
* Initialize the kernel pmap (which is statically allocated).
*/
PMAP_LOCK_INIT(kernel_pmap);
@@ -998,7 +955,7 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(va);
- if (pte != NULL && pmap_pte_v(pte))
+ if (pte != NULL && pte->pte_p)
pa = pmap_pte_pa(pte);
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
@@ -1015,18 +972,24 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
- vm_paddr_t pa;
+ struct ia64_lpte *pte;
+ pmap_t oldpmap;
vm_page_t m;
m = NULL;
- mtx_lock(&Giant);
- if ((pa = pmap_extract(pmap, va)) != 0) {
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_lock_queues();
+ if (pmap == NULL)
+ return (m);
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+ oldpmap = pmap_install(pmap);
+ pte = pmap_find_vhpt(va);
+ if (pte != NULL && pte->pte_p && (pte->pte_prot & prot) == prot) {
+ m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
vm_page_hold(m);
- vm_page_unlock_queues();
}
- mtx_unlock(&Giant);
+ vm_page_unlock_queues();
+ pmap_install(oldpmap);
+ PMAP_UNLOCK(pmap);
return (m);
}
@@ -1085,31 +1048,48 @@ pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va)
pte->pte_p = 0;
}
+static PMAP_INLINE void
+pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
+{
+ static int prot2ar[4] = {
+ PTE_AR_R, /* VM_PROT_NONE */
+ PTE_AR_RW, /* VM_PROT_WRITE */
+ PTE_AR_RX, /* VM_PROT_EXECUTE */
+ PTE_AR_RWX /* VM_PROT_WRITE|VM_PROT_EXECUTE */
+ };
+
+ pte->pte_prot = prot;
+ pte->pte_pl = (prot == VM_PROT_NONE || pm == kernel_pmap)
+ ? PTE_PL_KERN : PTE_PL_USER;
+ pte->pte_ar = prot2ar[(prot & VM_PROT_ALL) >> 1];
+}
+
/*
* Set a pte to contain a valid mapping and enter it in the VHPT. If
* the pte was orginally valid, then its assumed to already be in the
* VHPT.
+ * This functions does not set the protection bits. It's expected
+ * that those have been set correctly prior to calling this function.
*/
static void
pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
- int ig, int pl, int ar)
+ boolean_t wired, boolean_t managed)
{
int wasvalid = pte->pte_p;
pte->pte_p = 1;
pte->pte_ma = PTE_MA_WB;
- if (ig & PTE_IG_MANAGED) {
+ if (managed) {
pte->pte_a = 0;
pte->pte_d = 0;
} else {
pte->pte_a = 1;
pte->pte_d = 1;
}
- pte->pte_pl = pl;
- pte->pte_ar = ar;
pte->pte_ppn = pa >> 12;
pte->pte_ed = 0;
- pte->pte_ig = ig;
+ pte->pte_w = (wired) ? 1 : 0;
+ pte->pte_m = (managed) ? 1 : 0;
pte->pte_ps = PAGE_SHIFT;
pte->pte_key = 0;
@@ -1162,11 +1142,11 @@ pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
*/
pte->pte_p = 0;
- if (pte->pte_ig & PTE_IG_WIRED)
+ if (pte->pte_w)
pmap->pm_stats.wired_count -= 1;
pmap->pm_stats.resident_count -= 1;
- if (pte->pte_ig & PTE_IG_MANAGED) {
+ if (pte->pte_m) {
m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
if (pte->pte_d)
if (pmap_track_modified(va))
@@ -1212,16 +1192,15 @@ pmap_kextract(vm_offset_t va)
pte = pmap_find_kpte(va);
if (!pte->pte_p)
return (0);
- return ((pte->pte_ppn << 12) | (va & PAGE_MASK));
+ return (pmap_pte_pa(pte) | (va & PAGE_MASK));
}
/*
- * Add a list of wired pages to the kva
- * this routine is only used for temporary
- * kernel mappings that do not need to have
- * page modification or references recorded.
- * Note that old mappings are simply written
- * over. The page *must* be wired.
+ * Add a list of wired pages to the kva this routine is only used for
+ * temporary kernel mappings that do not need to have page modification
+ * or references recorded. Note that old mappings are simply written
+ * over. The page is effectively wired, but it's customary to not have
+ * the PTE reflect that, nor update statistics.
*/
void
pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
@@ -1234,8 +1213,8 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
int wasvalid;
pte = pmap_find_kpte(tva);
wasvalid = pte->pte_p;
- pmap_set_pte(pte, tva, VM_PAGE_TO_PHYS(m[i]),
- 0, PTE_PL_KERN, PTE_AR_RWX);
+ pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
+ pmap_set_pte(pte, tva, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
if (wasvalid)
ia64_ptc_g(tva, PAGE_SHIFT << 2);
}
@@ -1259,7 +1238,8 @@ pmap_qremove(vm_offset_t va, int count)
}
/*
- * Add a wired page to the kva.
+ * Add a wired page to the kva. As for pmap_qenter(), it's customary
+ * to not have the PTE reflect that, nor update statistics.
*/
void
pmap_kenter(vm_offset_t va, vm_offset_t pa)
@@ -1269,7 +1249,8 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
pte = pmap_find_kpte(va);
wasvalid = pte->pte_p;
- pmap_set_pte(pte, va, pa, 0, PTE_PL_KERN, PTE_AR_RWX);
+ pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
+ pmap_set_pte(pte, va, pa, FALSE, FALSE);
if (wasvalid)
ia64_ptc_g(va, PAGE_SHIFT << 2);
}
@@ -1404,7 +1385,7 @@ pmap_remove_all(vm_page_t m)
pv_entry_t pv;
int s;
-#if defined(PMAP_DIAGNOSTIC)
+#if defined(DIAGNOSTIC)
/*
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
@@ -1448,7 +1429,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
pmap_t oldpmap;
struct ia64_lpte *pte;
- int newprot;
if (pmap == NULL)
return;
@@ -1461,8 +1441,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (prot & VM_PROT_WRITE)
return;
- newprot = pte_prot(pmap, prot);
-
if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
panic("pmap_protect: unaligned addresses");
@@ -1479,8 +1457,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
continue;
}
- if (pmap_pte_prot(pte) != newprot) {
- if (pte->pte_ig & PTE_IG_MANAGED) {
+ if (pte->pte_prot != prot) {
+ if (pte->pte_m) {
vm_offset_t pa = pmap_pte_pa(pte);
vm_page_t m = PHYS_TO_VM_PAGE(pa);
if (pte->pte_d) {
@@ -1493,7 +1471,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pte->pte_a = 0;
}
}
- pmap_pte_set_prot(pte, newprot);
+ pmap_pte_prot(pmap, pte, prot);
pmap_update_vhpt(pte, sva);
pmap_invalidate_page(pmap, sva);
}
@@ -1519,14 +1497,14 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+ boolean_t wired)
{
pmap_t oldpmap;
vm_offset_t pa;
vm_offset_t opa;
struct ia64_lpte origpte;
struct ia64_lpte *pte;
- int managed;
+ boolean_t managed;
if (pmap == NULL)
return;
@@ -1536,7 +1514,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
oldpmap = pmap_install(pmap);
va &= ~PAGE_MASK;
-#ifdef PMAP_DIAGNOSTIC
+#ifdef DIAGNOSTIC
if (va > VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter: toobig");
#endif
@@ -1559,7 +1537,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
opa = pmap_pte_pa(&origpte);
else
opa = 0;
- managed = 0;
+ managed = FALSE;
pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
@@ -1573,16 +1551,16 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* are valid mappings in them. Hence, if a user page is wired,
* the PT page will be also.
*/
- if (wired && ((origpte.pte_ig & PTE_IG_WIRED) == 0))
+ if (wired && !origpte.pte_w)
pmap->pm_stats.wired_count++;
- else if (!wired && (origpte.pte_ig & PTE_IG_WIRED))
+ else if (!wired && origpte.pte_w)
pmap->pm_stats.wired_count--;
/*
* We might be turning off write access to the page,
* so we go ahead and sense modify status.
*/
- if (origpte.pte_ig & PTE_IG_MANAGED) {
+ if (origpte.pte_m) {
if (origpte.pte_d && pmap_track_modified(va)) {
vm_page_t om;
om = PHYS_TO_VM_PAGE(opa);
@@ -1590,7 +1568,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
}
- managed = origpte.pte_ig & PTE_IG_MANAGED;
+ managed = (origpte.pte_m) ? TRUE : FALSE;
goto validate;
}
/*
@@ -1610,7 +1588,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
- managed |= PTE_IG_MANAGED;
+ managed = TRUE;
}
/*
@@ -1626,8 +1604,8 @@ validate:
* Now validate mapping with desired protection/wiring. This
* adds the pte to the VHPT if necessary.
*/
- pmap_set_pte(pte, va, pa, managed | (wired ? PTE_IG_WIRED : 0),
- pte_prot_pl(pmap, prot), pte_prot_ar(pmap, prot));
+ pmap_pte_prot(pmap, pte, prot);
+ pmap_set_pte(pte, va, pa, wired, managed);
/*
* if the mapping or permission bits are different, we need
@@ -1657,7 +1635,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
struct ia64_lpte *pte;
pmap_t oldpmap;
- int managed;
+ boolean_t managed;
vm_page_lock_queues();
PMAP_LOCK(pmap);
@@ -1674,7 +1652,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
}
if (pte->pte_p)
goto reinstall;
- managed = 0;
+ managed = FALSE;
/*
* Enter on the PV list since its part of our managed memory.
@@ -1682,7 +1660,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
- managed |= PTE_IG_MANAGED;
+ managed = TRUE;
}
/*
@@ -1693,8 +1671,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
/*
* Initialise PTE with read-only protection and enter into VHPT.
*/
- pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), managed,
- PTE_PL_USER, PTE_AR_R);
+ pmap_pte_prot(pmap, pte, VM_PROT_READ);
+ pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
+
reinstall:
vm_page_unlock_queues();
pmap_install(oldpmap);
@@ -1742,16 +1721,16 @@ pmap_change_wiring(pmap, va, wired)
pte = pmap_find_vhpt(va);
KASSERT(pte != NULL, ("pte"));
- if (wired && !pmap_pte_w(pte))
+ if (wired && !pte->pte_w)
pmap->pm_stats.wired_count++;
- else if (!wired && pmap_pte_w(pte))
+ else if (!wired && pte->pte_w)
pmap->pm_stats.wired_count--;
/*
* Wiring is not a hardware characteristic so there is no need to
* invalidate TLB.
*/
- pmap_pte_set_w(pte, wired);
+ pte->pte_w = (wired) ? 1 : 0;
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
@@ -1903,7 +1882,7 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
- if (pte->pte_ig & PTE_IG_WIRED)
+ if (pte->pte_w)
continue;
pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1);
@@ -1923,7 +1902,7 @@ void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
struct ia64_lpte *pte;
- pmap_t oldpmap;
+ pmap_t oldpmap, pmap;
pv_entry_t pv;
if ((prot & VM_PROT_WRITE) != 0)
@@ -1932,16 +1911,16 @@ pmap_page_protect(vm_page_t m, vm_prot_t prot)
if ((m->flags & PG_WRITEABLE) == 0)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- int newprot = pte_prot(pv->pv_pmap, prot);
- PMAP_LOCK(pv->pv_pmap);
- oldpmap = pmap_install(pv->pv_pmap);
+ pmap = pv->pv_pmap;
+ PMAP_LOCK(pmap);
+ oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
- pmap_pte_set_prot(pte, newprot);
+ pmap_pte_prot(pmap, pte, prot);
pmap_update_vhpt(pte, pv->pv_va);
- pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ pmap_invalidate_page(pmap, pv->pv_va);
pmap_install(oldpmap);
- PMAP_UNLOCK(pv->pv_pmap);
+ PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
} else {
@@ -2127,63 +2106,6 @@ pmap_clear_reference(vm_page_t m)
}
/*
- * Miscellaneous support routines follow
- */
-
-static void
-ia64_protection_init()
-{
- int prot, *kp, *up;
-
- kp = protection_codes[0];
- up = protection_codes[1];
-
- for (prot = 0; prot < 8; prot++) {
- switch (prot) {
- case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
- *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_R << 2) | PTE_PL_KERN;
- break;
-
- case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
- *kp++ = (PTE_AR_X_RX << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_X_RX << 2) | PTE_PL_USER;
- break;
-
- case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
- *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_RW << 2) | PTE_PL_USER;
- break;
-
- case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
- *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER;
- break;
-
- case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
- *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_R << 2) | PTE_PL_USER;
- break;
-
- case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
- *kp++ = (PTE_AR_RX << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_RX << 2) | PTE_PL_USER;
- break;
-
- case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
- *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_RW << 2) | PTE_PL_USER;
- break;
-
- case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
- *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN;
- *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER;
- break;
- }
- }
-}
-
-/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
* routine is intended to be used for mapping device memory,
@@ -2227,12 +2149,12 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
if (!pte)
return 0;
- if (pmap_pte_v(pte)) {
+ if (pte->pte_p) {
vm_page_t m;
vm_offset_t pa;
val = MINCORE_INCORE;
- if ((pte->pte_ig & PTE_IG_MANAGED) == 0)
+ if (!pte->pte_m)
return val;
pa = pmap_pte_pa(pte);
@@ -2347,18 +2269,17 @@ static const char* psnames[] = {
static void
print_trs(int type)
{
- struct ia64_pal_result res;
- int i, maxtr;
+ struct ia64_pal_result res;
+ int i, maxtr;
struct {
struct ia64_pte pte;
struct ia64_itir itir;
- struct ia64_ifa ifa;
+ uint64_t ifa;
struct ia64_rr rr;
- } buf;
- static const char* manames[] = {
+ } buf;
+ static const char *manames[] = {
"WB", "bad", "bad", "bad",
"UC", "UCE", "WC", "NaT",
-
};
res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
@@ -2387,9 +2308,9 @@ print_trs(int type)
buf.pte.pte_ma = 0;
db_printf(
"%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s %d %06x\n",
- buf.ifa.ifa_ig & 1,
+ (int)buf.ifa & 1,
buf.rr.rr_rid,
- buf.ifa.ifa_vpn,
+ buf.ifa >> 12,
buf.pte.pte_ppn,
psnames[buf.itir.itir_ps],
buf.pte.pte_ed,
diff --git a/sys/ia64/include/pte.h b/sys/ia64/include/pte.h
index 28d47c0..09b0b54c 100644
--- a/sys/ia64/include/pte.h
+++ b/sys/ia64/include/pte.h
@@ -70,79 +70,76 @@
#define PTE_AR_RWX_RW 6
#define PTE_AR_X_RX 7
-#define PTE_IG_WIRED 1
-#define PTE_IG_MANAGED 2
-
/*
* A short-format VHPT entry. Also matches the TLB insertion format.
*/
struct ia64_pte {
- u_int64_t pte_p :1; /* bits 0..0 */
- u_int64_t pte_rv1 :1; /* bits 1..1 */
- u_int64_t pte_ma :3; /* bits 2..4 */
- u_int64_t pte_a :1; /* bits 5..5 */
- u_int64_t pte_d :1; /* bits 6..6 */
- u_int64_t pte_pl :2; /* bits 7..8 */
- u_int64_t pte_ar :3; /* bits 9..11 */
- u_int64_t pte_ppn :38; /* bits 12..49 */
- u_int64_t pte_rv2 :2; /* bits 50..51 */
- u_int64_t pte_ed :1; /* bits 52..52 */
- u_int64_t pte_ig :11; /* bits 53..63 */
+ uint64_t pte_p :1; /* bit 0 */
+ uint64_t __rv1__ :1; /* bit 1 */
+ uint64_t pte_ma :3; /* bits 2..4 */
+ uint64_t pte_a :1; /* bit 5 */
+ uint64_t pte_d :1; /* bit 6 */
+ uint64_t pte_pl :2; /* bits 7..8 */
+ uint64_t pte_ar :3; /* bits 9..11 */
+ uint64_t pte_ppn :38; /* bits 12..49 */
+ uint64_t __rv2__ :2; /* bits 50..51 */
+ uint64_t pte_ed :1; /* bit 52 */
+ /* The following bits are ignored by the hardware. */
+ uint64_t pte_w :1; /* bit 53 */
+ uint64_t pte_m :1; /* bit 54 */
+ uint64_t pte_prot:3; /* bits 55..57 */
+ uint64_t __ig__ :6; /* bits 58..63 */
};
/*
* A long-format VHPT entry.
*/
struct ia64_lpte {
- u_int64_t pte_p :1; /* bits 0..0 */
- u_int64_t pte_rv1 :1; /* bits 1..1 */
- u_int64_t pte_ma :3; /* bits 2..4 */
- u_int64_t pte_a :1; /* bits 5..5 */
- u_int64_t pte_d :1; /* bits 6..6 */
- u_int64_t pte_pl :2; /* bits 7..8 */
- u_int64_t pte_ar :3; /* bits 9..11 */
- u_int64_t pte_ppn :38; /* bits 12..49 */
- u_int64_t pte_rv2 :2; /* bits 50..51 */
- u_int64_t pte_ed :1; /* bits 52..52 */
- u_int64_t pte_ig :11; /* bits 53..63 */
-
- u_int64_t pte_rv3 :2; /* bits 0..1 */
- u_int64_t pte_ps :6; /* bits 2..7 */
- u_int64_t pte_key :24; /* bits 8..31 */
- u_int64_t pte_rv4 :32; /* bits 32..63 */
-
- u_int64_t pte_tag; /* includes ti */
-
- u_int64_t pte_chain; /* pa of collision chain */
+ uint64_t pte_p :1; /* bit 0 */
+ uint64_t __rv1__ :1; /* bit 1 */
+ uint64_t pte_ma :3; /* bits 2..4 */
+ uint64_t pte_a :1; /* bit 5 */
+ uint64_t pte_d :1; /* bit 6 */
+ uint64_t pte_pl :2; /* bits 7..8 */
+ uint64_t pte_ar :3; /* bits 9..11 */
+ uint64_t pte_ppn :38; /* bits 12..49 */
+ uint64_t __rv2__ :2; /* bits 50..51 */
+ uint64_t pte_ed :1; /* bit 52 */
+ /* The following 11 bits are ignored by the hardware. */
+ uint64_t pte_w :1; /* bit 53 */
+ uint64_t pte_m :1; /* bit 54 */
+ uint64_t pte_prot:3; /* bits 55..57 */
+ uint64_t __ig__ :6; /* bits 58..63 */
+
+ uint64_t __rv3__ :2; /* bits 0..1 */
+ uint64_t pte_ps :6; /* bits 2..7 */
+ uint64_t pte_key :24; /* bits 8..31 */
+ uint64_t __rv4__ :32; /* bits 32..63 */
+
+ uint64_t pte_tag; /* includes ti */
+
+ uint64_t pte_chain; /* pa of collision chain */
};
/*
* Layout of cr.itir.
*/
struct ia64_itir {
- u_int64_t itir_rv1 :2; /* bits 0..1 */
- u_int64_t itir_ps :6; /* bits 2..7 */
- u_int64_t itir_key :24; /* bits 8..31 */
- u_int64_t itir_rv2 :32; /* bits 32..63 */
-};
-
-/*
- * Layout of cr.ifa.
- */
-struct ia64_ifa {
- u_int64_t ifa_ig :12; /* bits 0..11 */
- u_int64_t ifa_vpn :52; /* bits 12..63 */
+ uint64_t __rv1__ :2; /* bits 0..1 */
+ uint64_t itir_ps :6; /* bits 2..7 */
+ uint64_t itir_key:24; /* bits 8..31 */
+ uint64_t __rv2__ :32; /* bits 32..63 */
};
/*
* Layout of rr[x].
*/
struct ia64_rr {
- u_int64_t rr_ve :1; /* bits 0..0 */
- u_int64_t rr_rv1 :1; /* bits 1..1 */
- u_int64_t rr_ps :6; /* bits 2..7 */
- u_int64_t rr_rid :24; /* bits 8..31 */
- u_int64_t rr_rv2 :32; /* bits 32..63 */
+ uint64_t rr_ve :1; /* bit 0 */
+ uint64_t __rv1__ :1; /* bit 1 */
+ uint64_t rr_ps :6; /* bits 2..7 */
+ uint64_t rr_rid :24; /* bits 8..31 */
+ uint64_t __rv2__ :32; /* bits 32..63 */
};
#endif /* !LOCORE */
OpenPOWER on IntegriCloud