summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-05-21 00:29:02 +0000
committerjake <jake@FreeBSD.org>2002-05-21 00:29:02 +0000
commite0ffa7e6d398b5c92e6de7cd4aad933f2106c1b8 (patch)
tree4555c1b3866d29bc0a9a0a0c12c14dd2bf02ae6e
parentaf380c638d312f54d9d66b8079ada90f5993e2e6 (diff)
downloadFreeBSD-src-e0ffa7e6d398b5c92e6de7cd4aad933f2106c1b8.zip
FreeBSD-src-e0ffa7e6d398b5c92e6de7cd4aad933f2106c1b8.tar.gz
Redefine the tte accessor macros to take a pointer to a tte, instead of the
value of the tag or data field. Add macros for getting the page shift, size and mask for the physical page that a tte maps (which may be one of several sizes). Use the new cache functions for invalidating single pages.
-rw-r--r--sys/sparc64/include/tlb.h5
-rw-r--r--sys/sparc64/include/tte.h51
-rw-r--r--sys/sparc64/sparc64/pmap.c87
-rw-r--r--sys/sparc64/sparc64/pv.c4
-rw-r--r--sys/sparc64/sparc64/tsb.c14
5 files changed, 77 insertions, 84 deletions
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
index 2a46933..d92af31 100644
--- a/sys/sparc64/include/tlb.h
+++ b/sys/sparc64/include/tlb.h
@@ -87,8 +87,7 @@ void tlb_context_demap(struct pmap *pm);
void tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va);
void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end);
-#define tlb_tte_demap(tte, pm) \
- tlb_page_demap(TD_GET_TLB((tte).tte_data), pm, \
- TV_GET_VA((tte).tte_vpn));
+#define tlb_tte_demap(tp, pm) \
+ tlb_page_demap(TTE_GET_TLB(tp), pm, TTE_GET_VA(tp))
#endif /* !_MACHINE_TLB_H_ */
diff --git a/sys/sparc64/include/tte.h b/sys/sparc64/include/tte.h
index 449ec88..39e85af 100644
--- a/sys/sparc64/include/tte.h
+++ b/sys/sparc64/include/tte.h
@@ -40,17 +40,19 @@
#define TD_PA_SHIFT (13)
#define TD_SOFT_SHIFT (7)
-#define TD_SIZE_SIZE (2)
-#define TD_SOFT2_SIZE (9)
-#define TD_DIAG_SIZE (9)
-#define TD_PA_SIZE (28)
-#define TD_SOFT_SIZE (6)
+#define TD_SIZE_BITS (2)
+#define TD_SOFT2_BITS (9)
+#define TD_DIAG_BITS (9)
+#define TD_PA_BITS (28)
+#define TD_SOFT_BITS (6)
-#define TD_SIZE_MASK (((1UL << TD_SIZE_SIZE) - 1) << TD_SIZE_SHIFT)
-#define TD_SOFT2_MASK (((1UL << TD_SOFT2_SIZE) - 1) << TD_SOFT2_SHIFT)
-#define TD_DIAG_MASK (((1UL << TD_DIAG_SIZE) - 1) << TD_DIAG_SHIFT)
-#define TD_PA_MASK (((1UL << TD_PA_SIZE) - 1) << TD_PA_SHIFT)
-#define TD_SOFT_MASK (((1UL << TD_SOFT_SIZE) - 1) << TD_SOFT_SHIFT)
+#define TD_SIZE_MASK ((1UL << TD_SIZE_BITS) - 1)
+#define TD_SOFT2_MASK ((1UL << TD_SOFT2_BITS) - 1)
+#define TD_DIAG_MASK ((1UL << TD_DIAG_BITS) - 1)
+#define TD_PA_MASK ((1UL << TD_PA_BITS) - 1)
+#define TD_SOFT_MASK ((1UL << TD_SOFT_BITS) - 1)
+
+#define TD_SIZE_SPREAD (3)
#define TS_EXEC (1UL << 4)
#define TS_REF (1UL << 3)
@@ -65,7 +67,7 @@
#define TD_4M (3UL << TD_SIZE_SHIFT)
#define TD_NFO (1UL << 60)
#define TD_IE (1UL << 59)
-#define TD_PA(pa) ((pa) & TD_PA_MASK)
+#define TD_PA(pa) ((pa) & (TD_PA_MASK << TD_PA_SHIFT))
#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT)
#define TD_REF (TS_REF << TD_SOFT_SHIFT)
#define TD_PV (TS_PV << TD_SOFT_SHIFT)
@@ -81,10 +83,21 @@
#define TV_VPN(va) ((va) >> PAGE_SHIFT)
-#define TD_GET_SIZE(d) (((d) >> TD_SIZE_SHIFT) & 3)
-#define TD_GET_PA(d) ((d) & TD_PA_MASK)
-#define TD_GET_TLB(d) (((d) & TD_EXEC) ? (TLB_DTLB | TLB_ITLB) : TLB_DTLB)
-#define TV_GET_VA(vpn) ((vpn) << PAGE_SHIFT)
+#define TTE_GET_SIZE(tp) \
+ (((tp)->tte_data >> TD_SIZE_SHIFT) & TD_SIZE_MASK)
+#define TTE_GET_PAGE_SHIFT(tp) \
+ (PAGE_SHIFT + (TTE_GET_SIZE(tp) * TD_SIZE_SPREAD))
+#define TTE_GET_PAGE_SIZE(tp) \
+ (1 << TTE_GET_PAGE_SHIFT(tp))
+#define TTE_GET_PAGE_MASK(tp) \
+ (TTE_GET_PAGE_SIZE(tp) - 1)
+
+#define TTE_GET_PA(tp) \
+ ((tp)->tte_data & (TD_PA_MASK << TD_PA_SHIFT))
+#define TTE_GET_TLB(tp) \
+ (((tp)->tte_data & TD_EXEC) ? (TLB_DTLB | TLB_ITLB) : TLB_DTLB)
+#define TTE_GET_VA(tp) \
+ ((tp)->tte_vpn << PAGE_SHIFT)
struct tte {
u_long tte_vpn;
@@ -92,15 +105,15 @@ struct tte {
};
static __inline int
-tte_match_vpn(struct tte tte, vm_offset_t vpn)
+tte_match_vpn(struct tte *tp, vm_offset_t vpn)
{
- return ((tte.tte_data & TD_V) != 0 && tte.tte_vpn == vpn);
+ return ((tp->tte_data & TD_V) != 0 && tp->tte_vpn == vpn);
}
static __inline int
-tte_match(struct tte tte, vm_offset_t va)
+tte_match(struct tte *tp, vm_offset_t va)
{
- return (tte_match_vpn(tte, va >> PAGE_SHIFT));
+ return (tte_match_vpn(tp, va >> PAGE_SHIFT));
}
#endif /* !_MACHINE_TTE_H_ */
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 26e9d0e..fa92dbf 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -153,16 +153,6 @@ struct pmap kernel_pmap_store;
static boolean_t pmap_initialized = FALSE;
-/* Convert a tte data field into a page mask */
-static vm_offset_t pmap_page_masks[] = {
- PAGE_MASK_8K,
- PAGE_MASK_64K,
- PAGE_MASK_512K,
- PAGE_MASK_4M
-};
-
-#define PMAP_TD_GET_MASK(d) pmap_page_masks[TD_GET_SIZE((d))]
-
/*
* Allocate physical memory for use in pmap_bootstrap.
*/
@@ -320,8 +310,8 @@ pmap_bootstrap(vm_offset_t ekva)
* pmap_kextract() will work for them.
*/
for (i = 0; i < kernel_tlb_slots; i++) {
- va = TV_GET_VA(kernel_ttes[i].tte_vpn);
- pa = TD_GET_PA(kernel_ttes[i].tte_data);
+ va = TTE_GET_VA(&kernel_ttes[i]);
+ pa = TTE_GET_PA(&kernel_ttes[i]);
for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
tp = tsb_kvtotte(va + off);
tp->tte_vpn = TV_VPN(va + off);
@@ -369,10 +359,10 @@ pmap_bootstrap(vm_offset_t ekva)
CTR0(KTR_PMAP, "pmap_bootstrap: translations");
qsort(translations, sz, sizeof (*translations), om_cmp);
for (i = 0; i < sz; i++) {
- CTR4(KTR_PMAP,
- "translation: start=%#lx size=%#lx tte=%#lx pa=%#lx",
+ CTR3(KTR_PMAP,
+ "translation: start=%#lx size=%#lx tte=%#lx",
translations[i].om_start, translations[i].om_size,
- translations[i].om_tte, TD_GET_PA(translations[i].om_tte));
+ translations[i].om_tte);
if (translations[i].om_start < 0xf0000000) /* XXX!!! */
continue;
for (off = 0; off < translations[i].om_size;
@@ -578,17 +568,14 @@ vm_offset_t
pmap_extract(pmap_t pm, vm_offset_t va)
{
struct tte *tp;
- u_long d;
if (pm == kernel_pmap)
return (pmap_kextract(va));
tp = tsb_tte_lookup(pm, va);
if (tp == NULL)
return (0);
- else {
- d = tp->tte_data;
- return (TD_GET_PA(d) | (va & PMAP_TD_GET_MASK(d)));
- }
+ else
+ return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
}
/*
@@ -599,13 +586,11 @@ vm_offset_t
pmap_kextract(vm_offset_t va)
{
struct tte *tp;
- u_long d;
tp = tsb_kvtotte(va);
- d = tp->tte_data;
- if ((d & TD_V) == 0)
+ if ((tp->tte_data & TD_V) == 0)
return (0);
- return (TD_GET_PA(d) | (va & PMAP_TD_GET_MASK(d)));
+ return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
}
int
@@ -634,8 +619,6 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
return (0);
}
CTR0(KTR_PMAP, "pmap_cache_enter: marking uncacheable");
- if ((m->flags & PG_UNMANAGED) != 0)
- panic("pmap_cache_enter: non-managed page");
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL) {
atomic_clear_long(&tp->tte_data, TD_CV);
@@ -644,7 +627,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
}
}
pa = VM_PAGE_TO_PHYS(m);
- dcache_inval_phys(pa, pa + PAGE_SIZE - 1);
+ dcache_page_inval(pa);
return (0);
}
@@ -1248,7 +1231,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
{
vm_page_t m;
- m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
+ m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
if ((tp->tte_data & TD_PV) != 0) {
if ((tp->tte_data & TD_W) != 0 &&
pmap_track_modified(pm, va))
@@ -1298,25 +1281,19 @@ pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
vm_offset_t va)
{
vm_page_t m;
- u_long data;
- data = tp->tte_data;
- if ((data & TD_PV) != 0) {
- m = PHYS_TO_VM_PAGE(TD_GET_PA(data));
- if ((data & TD_REF) != 0) {
+ if ((tp->tte_data & TD_PV) != 0) {
+ m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
+ if ((tp->tte_data & TD_REF) != 0) {
vm_page_flag_set(m, PG_REFERENCED);
- data &= ~TD_REF;
+ tp->tte_data &= ~TD_REF;
}
- if ((data & TD_W) != 0 &&
+ if ((tp->tte_data & TD_W) != 0 &&
pmap_track_modified(pm, va)) {
vm_page_dirty(m);
}
}
-
- data &= ~(TD_W | TD_SW);
- CTR2(KTR_PMAP, "pmap_protect: new=%#lx old=%#lx",
- data, tp->tte_data);
- tp->tte_data = data;
+ tp->tte_data &= ~(TD_W | TD_SW);
return (0);
}
@@ -1381,9 +1358,9 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
otte = *tp;
- om = PHYS_TO_VM_PAGE(TD_GET_PA(otte.tte_data));
+ om = PHYS_TO_VM_PAGE(TTE_GET_PA(&otte));
- if (TD_GET_PA(otte.tte_data) == pa) {
+ if (TTE_GET_PA(&otte) == pa) {
CTR0(KTR_PMAP, "pmap_enter: update");
PMAP_STATS_INC(pmap_enter_nupdate);
@@ -1418,7 +1395,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_track_modified(pm, va))
vm_page_dirty(m);
}
- tlb_tte_demap(otte, pm);
+ tlb_tte_demap(&otte, pm);
}
} else {
CTR0(KTR_PMAP, "pmap_enter: replace");
@@ -1449,7 +1426,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_cache_enter(m, va) != 0)
tte.tte_data |= TD_CV;
}
- tlb_tte_demap(otte, pm);
+ tlb_tte_demap(&otte, pm);
}
} else {
CTR0(KTR_PMAP, "pmap_enter: new");
@@ -1489,7 +1466,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (prot & VM_PROT_EXECUTE) {
tte.tte_data |= TD_EXEC;
PMAP_STATS_INC(pmap_niflush);
- icache_inval_phys(pa, pa + PAGE_SIZE - 1);
+ icache_page_inval(pa);
}
if (tp != NULL)
@@ -1543,7 +1520,7 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
tte.tte_data = tp->tte_data &
~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
tte.tte_vpn = TV_VPN(va);
- m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
+ m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
if ((tp->tte_data & TD_PV) != 0) {
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
("pmap_enter: unmanaged pv page"));
@@ -1585,22 +1562,24 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
void
pmap_zero_page(vm_page_t m)
{
- vm_offset_t pa = VM_PAGE_TO_PHYS(m);
+ vm_offset_t pa;
+ pa = VM_PAGE_TO_PHYS(m);
CTR1(KTR_PMAP, "pmap_zero_page: pa=%#lx", pa);
- dcache_inval_phys(pa, pa + PAGE_SIZE - 1);
+ dcache_page_inval(pa);
aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
}
void
pmap_zero_page_area(vm_page_t m, int off, int size)
{
- vm_offset_t pa = VM_PAGE_TO_PHYS(m);
+ vm_offset_t pa;
+ pa = VM_PAGE_TO_PHYS(m);
CTR3(KTR_PMAP, "pmap_zero_page_area: pa=%#lx off=%#x size=%#x",
pa, off, size);
KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
- dcache_inval_phys(pa + off, pa + off + size - 1);
+ dcache_page_inval(pa);
aszero(ASI_PHYS_USE_EC, pa + off, size);
}
@@ -1610,11 +1589,13 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
void
pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
{
- vm_offset_t src = VM_PAGE_TO_PHYS(msrc);
- vm_offset_t dst = VM_PAGE_TO_PHYS(mdst);
+ vm_offset_t dst;
+ vm_offset_t src;
+ dst = VM_PAGE_TO_PHYS(mdst);
+ src = VM_PAGE_TO_PHYS(msrc);
CTR2(KTR_PMAP, "pmap_copy_page: src=%#lx dst=%#lx", src, dst);
- dcache_inval_phys(dst, dst + PAGE_SIZE - 1);
+ dcache_page_inval(dst);
ascopy(ASI_PHYS_USE_EC, src, dst, PAGE_SIZE);
}
diff --git a/sys/sparc64/sparc64/pv.c b/sys/sparc64/sparc64/pv.c
index 7420734..c65b5a7 100644
--- a/sys/sparc64/sparc64/pv.c
+++ b/sys/sparc64/sparc64/pv.c
@@ -167,7 +167,7 @@ pv_bit_clear(vm_page_t m, u_long bits)
vm_page_dirty(m);
}
atomic_clear_long(&tp->tte_data, bits);
- tlb_tte_demap(*tp, pv->pv_pmap);
+ tlb_tte_demap(tp, pv->pv_pmap);
}
}
}
@@ -269,7 +269,7 @@ pv_remove_all(vm_page_t m)
vm_page_dirty(m);
}
atomic_clear_long(&tp->tte_data, TD_V);
- tlb_tte_demap(*tp, pv->pv_pmap);
+ tlb_tte_demap(tp, pv->pv_pmap);
tp->tte_vpn = 0;
tp->tte_data = 0;
pv->pv_pmap->pm_stats.resident_count--;
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
index 1132df3..ee0232f 100644
--- a/sys/sparc64/sparc64/tsb.c
+++ b/sys/sparc64/sparc64/tsb.c
@@ -110,7 +110,7 @@ tsb_tte_lookup(pmap_t pm, vm_offset_t va)
CTR3(KTR_TSB,
"tsb_tte_lookup: kernel va=%#lx tp=%#lx data=%#lx",
va, tp, tp->tte_data);
- if (tte_match(*tp, va)) {
+ if (tte_match(tp, va)) {
CTR1(KTR_TSB, "tsb_tte_lookup: match va=%#lx", va);
return (tp);
}
@@ -121,8 +121,8 @@ tsb_tte_lookup(pmap_t pm, vm_offset_t va)
CTR3(KTR_TSB, "tsb_tte_lookup: ctx=%#lx va=%#lx bucket=%p",
pm->pm_context[PCPU_GET(cpuid)], va, bucket);
for (i = 0; i < TSB_BUCKET_SIZE; i++) {
- if (tte_match(bucket[i], va)) {
- tp = &bucket[i];
+ tp = &bucket[i];
+ if (tte_match(tp, va)) {
CTR2(KTR_TSB,
"tsb_tte_lookup: match va=%#lx tp=%p",
va, tp);
@@ -179,9 +179,9 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
tp = rtp;
if ((tp->tte_data & TD_V) != 0) {
TSB_STATS_INC(tsb_nrepl);
- ova = TV_GET_VA(tp->tte_vpn);
+ ova = TTE_GET_VA(tp);
if ((tp->tte_data & TD_PV) != 0) {
- om = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
+ om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
if ((tp->tte_data & TD_W) != 0 &&
pmap_track_modified(pm, ova))
vm_page_dirty(om);
@@ -190,7 +190,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
pmap_cache_remove(om, ova);
pv_remove(pm, om, ova);
}
- tlb_tte_demap(*tp, pm);
+ tlb_tte_demap(tp, pm);
}
*tp = tte;
@@ -219,7 +219,7 @@ tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
for (i = 0; i < TSB_SIZE; i++) {
tp = &pm1->pm_tsb[i];
if ((tp->tte_data & TD_V) != 0) {
- va = TV_GET_VA(tp->tte_vpn);
+ va = TTE_GET_VA(tp);
if (va >= start && va < end) {
if (!callback(pm1, pm2, tp, va))
break;
OpenPOWER on IntegriCloud