summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2002-12-21 22:43:19 +0000
committerjake <jake@FreeBSD.org>2002-12-21 22:43:19 +0000
commite78787e4ef8ed8f3be0d09456ca984a89170e783 (patch)
tree753f9c747cbb5133521415c6ab48f5e198d43220
parent94d1ceabe0b85163330b21789aed37043df7a83b (diff)
downloadFreeBSD-src-e78787e4ef8ed8f3be0d09456ca984a89170e783.zip
FreeBSD-src-e78787e4ef8ed8f3be0d09456ca984a89170e783.tar.gz
- Add a pmap pointer to struct md_page, and use this to find the pmap that
a mapping belongs to by setting it in the vm_page_t structure that backs the tsb page that the tte for a mapping is in. This allows the pmap that a mapping belongs to to be found without keeping a pointer to it in the tte itself. - Remove the pmap pointer from struct tte and use the space to make the tte pv lists doubly linked (TAILQs), like on other architectures. This makes entering or removing a mapping O(1) instead of O(n) where n is the number of pmaps a page is mapped by (including kernel_pmap). - Use atomic ops for setting and clearing bits in the ttes, now that they return the old value and can be easily used for this purpose. - Use __builtin_memset for zeroing ttes instead of bzero, so that gcc will inline it (4 inline stores using %g0 instead of a function call). - Initially set the virtual colour for all the vm_page_ts to be equal to their physical colour. This will be more useful once uma_small_alloc is implemented, but basically pages with virtual colour equal to phsyical colour are easier to handle at the pmap level because they can be safely accessed through cachable direct virtual to physical mappings with that colour, without fear of causing illegal dcache aliases. In total these changes give a minor performance improvement, about 1% reduction in system time during buildworld.
-rw-r--r--sys/sparc64/include/pmap.h11
-rw-r--r--sys/sparc64/include/tte.h9
-rw-r--r--sys/sparc64/sparc64/pmap.c116
-rw-r--r--sys/sparc64/sparc64/tsb.c3
4 files changed, 70 insertions, 69 deletions
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h
index 427074a..d0a11fa 100644
--- a/sys/sparc64/include/pmap.h
+++ b/sys/sparc64/include/pmap.h
@@ -55,16 +55,17 @@
#define PMAP_CONTEXT_MAX 8192
-#define pmap_page_is_mapped(m) (!STAILQ_EMPTY(&(m)->md.tte_list))
+#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.tte_list))
#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
typedef struct pmap *pmap_t;
struct md_page {
- STAILQ_HEAD(, tte) tte_list;
- int colors[DCACHE_COLORS];
- int color;
- int flags;
+ TAILQ_HEAD(, tte) tte_list;
+ struct pmap *pmap;
+ uint32_t colors[DCACHE_COLORS];
+ int32_t color;
+ uint32_t flags;
};
struct pmap {
diff --git a/sys/sparc64/include/tte.h b/sys/sparc64/include/tte.h
index e86c722..c2b38b1 100644
--- a/sys/sparc64/include/tte.h
+++ b/sys/sparc64/include/tte.h
@@ -104,17 +104,18 @@
#define TTE_GET_VA(tp) \
(TTE_GET_VPN(tp) << TTE_GET_PAGE_SHIFT(tp))
#define TTE_GET_PMAP(tp) \
- ((tp)->tte_pmap)
+ (((tp)->tte_data & TD_P) != 0 ? \
+ (kernel_pmap) : \
+ (PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)(tp)))->md.pmap))
#define TTE_ZERO(tp) \
- bzero(tp, sizeof(*tp))
+ __builtin_memset(tp, 0, sizeof(*tp))
struct pmap;
struct tte {
u_long tte_vpn;
u_long tte_data;
- STAILQ_ENTRY(tte) tte_link;
- struct pmap *tte_pmap;
+ TAILQ_ENTRY(tte) tte_link;
};
static __inline int
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 3e29af7..0296376 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -594,9 +594,10 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
vm_page_t m;
m = &vm_page_array[i];
- STAILQ_INIT(&m->md.tte_list);
+ TAILQ_INIT(&m->md.tte_list);
+ m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
m->md.flags = 0;
- m->md.color = 0;
+ m->md.pmap = NULL;
}
for (i = 0; i < translations_size; i++) {
@@ -706,8 +707,8 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
* Mark all mappings as uncacheable, flush any lines with the other
* color out of the dcache, and set the color to none (-1).
*/
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
- tp->tte_data &= ~TD_CV;
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ atomic_clear_long(&tp->tte_data, TD_CV);
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
dcache_page_inval(VM_PAGE_TO_PHYS(m));
@@ -765,8 +766,8 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
* color. There should be no lines in the data cache for this page,
* so flushing should not be needed.
*/
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
- tp->tte_data |= TD_CV;
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ atomic_set_long(&tp->tte_data, TD_CV);
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
m->md.color = DCACHE_OTHER_COLOR(color);
@@ -791,7 +792,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
if ((tp->tte_data & TD_V) != 0) {
om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
ova = TTE_GET_VA(tp);
- STAILQ_REMOVE(&om->md.tte_list, tp, tte, tte_link);
+ TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
pmap_cache_remove(om, ova);
if (va != ova)
tlb_page_demap(kernel_pmap, ova);
@@ -801,8 +802,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
data |= TD_CV;
tp->tte_vpn = TV_VPN(va, TS_8K);
tp->tte_data = data;
- STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
- tp->tte_pmap = kernel_pmap;
+ TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
}
/*
@@ -856,7 +856,7 @@ pmap_kremove(vm_offset_t va)
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
tp->tte_data);
m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
- STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
+ TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
pmap_cache_remove(m, va);
TTE_ZERO(tp);
}
@@ -1204,6 +1204,7 @@ pmap_pinit(pmap_t pm)
vm_page_flag_clear(m, PG_BUSY);
m->valid = VM_PAGE_BITS_ALL;
+ m->md.pmap = pm;
ma[i] = m;
}
@@ -1247,6 +1248,7 @@ pmap_release(pmap_t pm)
vm_page_busy(m);
KASSERT(m->hold_count == 0,
("pmap_release: freeing held tsb page"));
+ m->md.pmap = NULL;
m->wire_count--;
cnt.v_wire_count--;
vm_page_free_zero(m);
@@ -1270,18 +1272,19 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
vm_offset_t va)
{
vm_page_t m;
+ u_long data;
- m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
- STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
- if ((tp->tte_data & TD_WIRED) != 0)
+ data = atomic_readandclear_long(&tp->tte_data);
+ m = PHYS_TO_VM_PAGE(TD_PA(data));
+ TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
+ if ((data & TD_WIRED) != 0)
pm->pm_stats.wired_count--;
- if ((tp->tte_data & TD_PV) != 0) {
- if ((tp->tte_data & TD_W) != 0 &&
- pmap_track_modified(pm, va))
+ if ((data & TD_PV) != 0) {
+ if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
vm_page_dirty(m);
- if ((tp->tte_data & TD_REF) != 0)
+ if ((data & TD_REF) != 0)
vm_page_flag_set(m, PG_REFERENCED);
- if (STAILQ_EMPTY(&m->md.tte_list))
+ if (TAILQ_EMPTY(&m->md.tte_list))
vm_page_flag_clear(m, PG_WRITEABLE);
pm->pm_stats.resident_count--;
}
@@ -1330,8 +1333,8 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
("pv_remove_all: illegal for unmanaged page %#lx",
VM_PAGE_TO_PHYS(m)));
- for (tp = STAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
- tpn = STAILQ_NEXT(tp, tte_link);
+ for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
+ tpn = TAILQ_NEXT(tp, tte_link);
if ((tp->tte_data & TD_PV) == 0)
continue;
pm = TTE_GET_PMAP(tp);
@@ -1345,7 +1348,7 @@ pmap_remove_all(vm_page_t m)
vm_page_dirty(m);
tp->tte_data &= ~TD_V;
tlb_page_demap(pm, va);
- STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
+ TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
pm->pm_stats.resident_count--;
pmap_cache_remove(m, va);
TTE_ZERO(tp);
@@ -1357,20 +1360,17 @@ int
pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
vm_offset_t va)
{
+ u_long data;
vm_page_t m;
- if ((tp->tte_data & TD_PV) != 0) {
- m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
- if ((tp->tte_data & TD_REF) != 0) {
+ data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W);
+ if ((data & TD_PV) != 0) {
+ m = PHYS_TO_VM_PAGE(TD_PA(data));
+ if ((data & TD_REF) != 0)
vm_page_flag_set(m, PG_REFERENCED);
- tp->tte_data &= ~TD_REF;
- }
- if ((tp->tte_data & TD_W) != 0 &&
- pmap_track_modified(pm, va)) {
+ if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
vm_page_dirty(m);
- }
}
- tp->tte_data &= ~(TD_W | TD_SW);
return (0);
}
@@ -1548,16 +1548,17 @@ void
pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
{
struct tte *tp;
+ u_long data;
if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
if (wired) {
- if ((tp->tte_data & TD_WIRED) == 0)
+ data = atomic_set_long(&tp->tte_data, TD_WIRED);
+ if ((data & TD_WIRED) == 0)
pm->pm_stats.wired_count++;
- tp->tte_data |= TD_WIRED;
} else {
- if ((tp->tte_data & TD_WIRED) != 0)
+ data = atomic_clear_long(&tp->tte_data, TD_WIRED);
+ if ((data & TD_WIRED) != 0)
pm->pm_stats.wired_count--;
- tp->tte_data &= ~TD_WIRED;
}
}
}
@@ -1668,7 +1669,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
loops = 0;
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
if (TTE_GET_PMAP(tp) == pm)
@@ -1730,26 +1731,25 @@ pmap_ts_referenced(vm_page_t m)
struct tte *tpf;
struct tte *tpn;
struct tte *tp;
+ u_long data;
int count;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (0);
count = 0;
- if ((tp = STAILQ_FIRST(&m->md.tte_list)) != NULL) {
+ if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
tpf = tp;
do {
- tpn = STAILQ_NEXT(tp, tte_link);
- STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
- STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
+ tpn = TAILQ_NEXT(tp, tte_link);
+ TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
+ TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
if ((tp->tte_data & TD_PV) == 0 ||
!pmap_track_modified(TTE_GET_PMAP(tp),
TTE_GET_VA(tp)))
continue;
- if ((tp->tte_data & TD_REF) != 0) {
- tp->tte_data &= ~TD_REF;
- if (++count > 4)
- break;
- }
+ data = atomic_clear_long(&tp->tte_data, TD_REF);
+ if ((data & TD_REF) != 0 && ++count > 4)
+ break;
} while ((tp = tpn) != NULL && tp != tpf);
}
return (count);
@@ -1762,7 +1762,7 @@ pmap_is_modified(vm_page_t m)
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return FALSE;
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0 ||
!pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp)))
continue;
@@ -1776,16 +1776,16 @@ void
pmap_clear_modify(vm_page_t m)
{
struct tte *tp;
+ u_long data;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
- if ((tp->tte_data & TD_W) != 0) {
- tp->tte_data &= ~TD_W;
+ data = atomic_clear_long(&tp->tte_data, TD_W);
+ if ((data & TD_W) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
- }
}
}
@@ -1793,16 +1793,16 @@ void
pmap_clear_reference(vm_page_t m)
{
struct tte *tp;
+ u_long data;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
- if ((tp->tte_data & TD_REF) != 0) {
- tp->tte_data &= ~TD_REF;
+ data = atomic_clear_long(&tp->tte_data, TD_REF);
+ if ((data & TD_REF) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
- }
}
}
@@ -1810,19 +1810,19 @@ void
pmap_clear_write(vm_page_t m)
{
struct tte *tp;
+ u_long data;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
- STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
- if ((tp->tte_data & (TD_SW | TD_W)) != 0) {
- if ((tp->tte_data & TD_W) != 0 &&
- pmap_track_modified(TTE_GET_PMAP(tp),
+ data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
+ if ((data & TD_W) != 0) {
+ if (pmap_track_modified(TTE_GET_PMAP(tp),
TTE_GET_VA(tp)))
vm_page_dirty(m);
- tp->tte_data &= ~(TD_SW | TD_W);
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
index a2293cd..2be5d97 100644
--- a/sys/sparc64/sparc64/tsb.c
+++ b/sys/sparc64/sparc64/tsb.c
@@ -183,8 +183,7 @@ enter:
tp->tte_vpn = TV_VPN(va, sz);
tp->tte_data = data;
- STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
- tp->tte_pmap = pm;
+ TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
return (tp);
}
OpenPOWER on IntegriCloud