summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2003-01-05 05:30:40 +0000
committerjake <jake@FreeBSD.org>2003-01-05 05:30:40 +0000
commit07e8d84aeafc4c536fc3617199cb67ff304769d0 (patch)
tree8143586ff4eede072e84820580516ab704855c8d
parent2c839e264808090112dce1afcf161ca60c37c325 (diff)
downloadFreeBSD-src-07e8d84aeafc4c536fc3617199cb67ff304769d0.zip
FreeBSD-src-07e8d84aeafc4c536fc3617199cb67ff304769d0.tar.gz
- Reorganize PMAP_STATS to scale a little better.
- Add some more stats for things that are now considered interesting.
-rw-r--r--sys/sparc64/include/pmap.h19
-rw-r--r--sys/sparc64/sparc64/cache.c32
-rw-r--r--sys/sparc64/sparc64/pmap.c173
-rw-r--r--sys/sparc64/sparc64/tlb.c11
-rw-r--r--sys/sparc64/sparc64/tsb.c44
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c12
6 files changed, 167 insertions, 124 deletions
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h
index d0a11fa..a14673e 100644
--- a/sys/sparc64/include/pmap.h
+++ b/sys/sparc64/include/pmap.h
@@ -115,4 +115,23 @@ pmap_track_modified(pmap_t pm, vm_offset_t va)
return (1);
}
+#ifdef PMAP_STATS
+
+SYSCTL_DECL(_debug_pmap_stats);
+
+#define PMAP_STATS_VAR(name) \
+ static long name; \
+ SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, name, CTLFLAG_RD, \
+ &name, 0, "")
+
+#define PMAP_STATS_INC(var) \
+ atomic_add_long(&var, 1)
+
+#else
+
+#define PMAP_STATS_VAR(name)
+#define PMAP_STATS_INC(var)
+
+#endif
+
#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/sparc64/sparc64/cache.c b/sys/sparc64/sparc64/cache.c
index 227577e..4ef6bb3 100644
--- a/sys/sparc64/sparc64/cache.c
+++ b/sys/sparc64/sparc64/cache.c
@@ -177,32 +177,10 @@
struct cacheinfo cache;
-#ifdef PMAP_STATS
-static long dcache_npage_inval;
-static long dcache_npage_inval_line;
-static long dcache_npage_inval_match;
-static long icache_npage_inval;
-static long icache_npage_inval_line;
-static long icache_npage_inval_match;
-
-SYSCTL_DECL(_debug_pmap_stats);
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, dcache_npage_inval, CTLFLAG_RD,
- &dcache_npage_inval, 0, "Number of calls to dcache_page_inval");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, dcache_npage_inval_line, CTLFLAG_RD,
- &dcache_npage_inval_line, 0, "Number of lines checked");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, dcache_npage_inval_match, CTLFLAG_RD,
- &dcache_npage_inval_match, 0, "Number of matching lines");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, icache_npage_inval, CTLFLAG_RD,
- &icache_npage_inval, 0, "Number of calls to icache_page_inval");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, icache_npage_inval_line, CTLFLAG_RD,
- &icache_npage_inval_line, 0, "Number of lines checked");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, icache_npage_inval_match, CTLFLAG_RD,
- &icache_npage_inval_match, 0, "Number of matching lines");
-
-#define PMAP_STATS_INC(var) atomic_add_long(&var, 1)
-#else
-#define PMAP_STATS_INC(var)
-#endif
+PMAP_STATS_VAR(dcache_npage_inval);
+PMAP_STATS_VAR(dcache_npage_inval_match);
+PMAP_STATS_VAR(icache_npage_inval);
+PMAP_STATS_VAR(icache_npage_inval_match);
/* Read to %g0, needed for E$ access. */
#define CDIAG_RDG0(asi, addr) \
@@ -266,7 +244,6 @@ dcache_page_inval(vm_offset_t pa)
target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
cookie = ipi_dcache_page_inval(pa);
for (addr = 0; addr < cache.dc_size; addr += cache.dc_linesize) {
- PMAP_STATS_INC(dcache_npage_inval_line);
tag = ldxa(addr, ASI_DCACHE_TAG);
if (((tag >> DC_VALID_SHIFT) & DC_VALID_MASK) == 0)
continue;
@@ -296,7 +273,6 @@ icache_page_inval(vm_offset_t pa)
target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
cookie = ipi_icache_page_inval(pa);
for (addr = 0; addr < cache.ic_size; addr += cache.ic_linesize) {
- PMAP_STATS_INC(icache_npage_inval_line);
__asm __volatile("ldda [%1] %2, %%g0" /*, %g1 */
: "=r" (tag) : "r" (addr), "n" (ASI_ICACHE_TAG));
if (((tag >> IC_VALID_SHIFT) & IC_VALID_MASK) == 0)
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 14c49e8..ec19d89 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -89,7 +89,6 @@
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
-#include <vm/uma.h>
#include <machine/cache.h>
#include <machine/frame.h>
@@ -176,50 +175,53 @@ extern int tl1_dmmu_prot_patch_2[];
*/
#define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
-#ifdef PMAP_STATS
-static long pmap_enter_nupdate;
-static long pmap_enter_nreplace;
-static long pmap_enter_nnew;
-static long pmap_ncache_enter;
-static long pmap_ncache_enter_c;
-static long pmap_ncache_enter_cc;
-static long pmap_ncache_enter_nc;
-static long pmap_ncache_remove;
-static long pmap_ncache_remove_c;
-static long pmap_ncache_remove_cc;
-static long pmap_ncache_remove_nc;
-static long pmap_niflush;
-
-SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "Statistics");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nupdate, CTLFLAG_RD,
- &pmap_enter_nupdate, 0, "Number of pmap_enter() updates");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nreplace, CTLFLAG_RD,
- &pmap_enter_nreplace, 0, "Number of pmap_enter() replacements");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nnew, CTLFLAG_RD,
- &pmap_enter_nnew, 0, "Number of pmap_enter() additions");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter, CTLFLAG_RD,
- &pmap_ncache_enter, 0, "Number of pmap_cache_enter() calls");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_c, CTLFLAG_RD,
- &pmap_ncache_enter_c, 0, "Number of pmap_cache_enter() cacheable");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_cc, CTLFLAG_RD,
- &pmap_ncache_enter_cc, 0, "Number of pmap_cache_enter() change color");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_nc, CTLFLAG_RD,
- &pmap_ncache_enter_nc, 0, "Number of pmap_cache_enter() noncacheable");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove, CTLFLAG_RD,
- &pmap_ncache_remove, 0, "Number of pmap_cache_remove() calls");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_c, CTLFLAG_RD,
- &pmap_ncache_remove_c, 0, "Number of pmap_cache_remove() cacheable");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_cc, CTLFLAG_RD,
- &pmap_ncache_remove_cc, 0, "Number of pmap_cache_remove() change color");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_nc, CTLFLAG_RD,
- &pmap_ncache_remove_nc, 0, "Number of pmap_cache_remove() noncacheable");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_niflush, CTLFLAG_RD,
- &pmap_niflush, 0, "Number of pmap I$ flushes");
-
-#define PMAP_STATS_INC(var) atomic_add_long(&var, 1)
-#else
-#define PMAP_STATS_INC(var)
-#endif
+SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
+
+PMAP_STATS_VAR(pmap_nenter);
+PMAP_STATS_VAR(pmap_nenter_update);
+PMAP_STATS_VAR(pmap_nenter_replace);
+PMAP_STATS_VAR(pmap_nenter_new);
+PMAP_STATS_VAR(pmap_nkenter);
+PMAP_STATS_VAR(pmap_nkremove);
+PMAP_STATS_VAR(pmap_nqenter);
+PMAP_STATS_VAR(pmap_nqremove);
+PMAP_STATS_VAR(pmap_ncache_enter);
+PMAP_STATS_VAR(pmap_ncache_enter_c);
+PMAP_STATS_VAR(pmap_ncache_enter_oc);
+PMAP_STATS_VAR(pmap_ncache_enter_cc);
+PMAP_STATS_VAR(pmap_ncache_enter_coc);
+PMAP_STATS_VAR(pmap_ncache_enter_nc);
+PMAP_STATS_VAR(pmap_ncache_enter_cnc);
+PMAP_STATS_VAR(pmap_ncache_remove);
+PMAP_STATS_VAR(pmap_ncache_remove_c);
+PMAP_STATS_VAR(pmap_ncache_remove_oc);
+PMAP_STATS_VAR(pmap_ncache_remove_cc);
+PMAP_STATS_VAR(pmap_ncache_remove_coc);
+PMAP_STATS_VAR(pmap_ncache_remove_nc);
+PMAP_STATS_VAR(pmap_niflush);
+PMAP_STATS_VAR(pmap_nzero_page);
+PMAP_STATS_VAR(pmap_nzero_page_c);
+PMAP_STATS_VAR(pmap_nzero_page_oc);
+PMAP_STATS_VAR(pmap_nzero_page_nc);
+PMAP_STATS_VAR(pmap_nzero_page_area);
+PMAP_STATS_VAR(pmap_nzero_page_area_c);
+PMAP_STATS_VAR(pmap_nzero_page_area_oc);
+PMAP_STATS_VAR(pmap_nzero_page_area_nc);
+PMAP_STATS_VAR(pmap_nzero_page_idle);
+PMAP_STATS_VAR(pmap_nzero_page_idle_c);
+PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
+PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
+PMAP_STATS_VAR(pmap_ncopy_page);
+PMAP_STATS_VAR(pmap_ncopy_page_c);
+PMAP_STATS_VAR(pmap_ncopy_page_oc);
+PMAP_STATS_VAR(pmap_ncopy_page_nc);
+PMAP_STATS_VAR(pmap_ncopy_page_dc);
+PMAP_STATS_VAR(pmap_ncopy_page_doc);
+PMAP_STATS_VAR(pmap_ncopy_page_sc);
+PMAP_STATS_VAR(pmap_ncopy_page_soc);
+
+PMAP_STATS_VAR(pmap_nnew_thread);
+PMAP_STATS_VAR(pmap_nnew_thread_oc);
/*
* Quick sort callout for comparing memory regions.
@@ -676,7 +678,10 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
if (m->md.color == color) {
KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
("pmap_cache_enter: cacheable, mappings of other color"));
- PMAP_STATS_INC(pmap_ncache_enter_c);
+ if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
+ PMAP_STATS_INC(pmap_ncache_enter_c);
+ else
+ PMAP_STATS_INC(pmap_ncache_enter_oc);
return (1);
}
@@ -691,17 +696,22 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
("pmap_cache_enter: changing color, not new mapping"));
dcache_page_inval(VM_PAGE_TO_PHYS(m));
m->md.color = color;
- PMAP_STATS_INC(pmap_ncache_enter_cc);
+ if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
+ PMAP_STATS_INC(pmap_ncache_enter_cc);
+ else
+ PMAP_STATS_INC(pmap_ncache_enter_coc);
return (1);
}
- PMAP_STATS_INC(pmap_ncache_enter_nc);
-
/*
* If the mapping is already non-cacheable, just return.
*/
- if (m->md.color == -1)
+ if (m->md.color == -1) {
+ PMAP_STATS_INC(pmap_ncache_enter_nc);
return (0);
+ }
+
+ PMAP_STATS_INC(pmap_ncache_enter_cnc);
/*
* Mark all mappings as uncacheable, flush any lines with the other
@@ -741,7 +751,10 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
* if there are no longer any mappings.
*/
if (m->md.color != -1) {
- PMAP_STATS_INC(pmap_ncache_remove_c);
+ if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
+ PMAP_STATS_INC(pmap_ncache_remove_c);
+ else
+ PMAP_STATS_INC(pmap_ncache_remove_oc);
return;
}
@@ -758,8 +771,6 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
return;
}
- PMAP_STATS_INC(pmap_ncache_remove_cc);
-
/*
* The number of mappings for this color is now zero. Recache the
* other colored mappings, and change the page color to the other
@@ -771,6 +782,11 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
m->md.color = DCACHE_OTHER_COLOR(color);
+
+ if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
+ PMAP_STATS_INC(pmap_ncache_remove_cc);
+ else
+ PMAP_STATS_INC(pmap_ncache_remove_coc);
}
/*
@@ -785,6 +801,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
vm_page_t m;
u_long data;
+ PMAP_STATS_INC(pmap_nkenter);
tp = tsb_kvtotte(va);
m = PHYS_TO_VM_PAGE(pa);
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
@@ -833,6 +850,7 @@ pmap_kremove(vm_offset_t va)
struct tte *tp;
vm_page_t m;
+ PMAP_STATS_INC(pmap_nkremove);
tp = tsb_kvtotte(va);
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
tp->tte_data);
@@ -881,6 +899,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
+ PMAP_STATS_INC(pmap_nqenter);
va = sva;
while (count-- > 0) {
pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
@@ -899,6 +918,7 @@ pmap_qremove(vm_offset_t sva, int count)
{
vm_offset_t va;
+ PMAP_STATS_INC(pmap_nqremove);
va = sva;
while (count-- > 0) {
pmap_kremove(va);
@@ -925,6 +945,7 @@ pmap_new_thread(struct thread *td, int pages)
vm_page_t m;
u_int i;
+ PMAP_STATS_INC(pmap_nnew_thread);
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
@@ -963,6 +984,9 @@ pmap_new_thread(struct thread *td, int pages)
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
+ if (DCACHE_COLOR(ks + (i * PAGE_SIZE)) !=
+ DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
+ PMAP_STATS_INC(pmap_nnew_thread_oc);
vm_page_lock_queues();
vm_page_wakeup(m);
@@ -1365,6 +1389,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_offset_t pa;
u_long data;
+ PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
CTR6(KTR_PMAP,
"pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
@@ -1376,7 +1401,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
CTR0(KTR_PMAP, "pmap_enter: update");
- PMAP_STATS_INC(pmap_enter_nupdate);
+ PMAP_STATS_INC(pmap_nenter_update);
/*
* Wiring change, just update stats.
@@ -1434,14 +1459,14 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (tp != NULL) {
CTR0(KTR_PMAP, "pmap_enter: replace");
- PMAP_STATS_INC(pmap_enter_nreplace);
+ PMAP_STATS_INC(pmap_nenter_replace);
vm_page_lock_queues();
pmap_remove_tte(pm, NULL, tp, va);
vm_page_unlock_queues();
tlb_page_demap(pm, va);
} else {
CTR0(KTR_PMAP, "pmap_enter: new");
- PMAP_STATS_INC(pmap_enter_nnew);
+ PMAP_STATS_INC(pmap_nenter_new);
}
/*
@@ -1553,13 +1578,17 @@ pmap_zero_page(vm_page_t m)
vm_offset_t va;
struct tte *tp;
+ PMAP_STATS_INC(pmap_nzero_page);
pa = VM_PAGE_TO_PHYS(m);
- if (m->md.color == -1)
+ if (m->md.color == -1) {
+ PMAP_STATS_INC(pmap_nzero_page_nc);
aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
- else if (m->md.color == DCACHE_COLOR(pa)) {
+ } else if (m->md.color == DCACHE_COLOR(pa)) {
+ PMAP_STATS_INC(pmap_nzero_page_c);
va = TLB_PHYS_TO_DIRECT(pa);
bzero((void *)va, PAGE_SIZE);
} else {
+ PMAP_STATS_INC(pmap_nzero_page_oc);
va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
tp = tsb_kvtotte(va);
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
@@ -1577,13 +1606,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
struct tte *tp;
KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
+ PMAP_STATS_INC(pmap_nzero_page_area);
pa = VM_PAGE_TO_PHYS(m);
- if (m->md.color == -1)
+ if (m->md.color == -1) {
+ PMAP_STATS_INC(pmap_nzero_page_area_nc);
aszero(ASI_PHYS_USE_EC, pa + off, size);
- else if (m->md.color == DCACHE_COLOR(pa)) {
+ } else if (m->md.color == DCACHE_COLOR(pa)) {
+ PMAP_STATS_INC(pmap_nzero_page_area_c);
va = TLB_PHYS_TO_DIRECT(pa);
bzero((void *)(va + off), size);
} else {
+ PMAP_STATS_INC(pmap_nzero_page_area_oc);
va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
tp = tsb_kvtotte(va);
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
@@ -1600,13 +1633,17 @@ pmap_zero_page_idle(vm_page_t m)
vm_offset_t va;
struct tte *tp;
+ PMAP_STATS_INC(pmap_nzero_page_idle);
pa = VM_PAGE_TO_PHYS(m);
- if (m->md.color == -1)
+ if (m->md.color == -1) {
+ PMAP_STATS_INC(pmap_nzero_page_idle_nc);
aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
- else if (m->md.color == DCACHE_COLOR(pa)) {
+ } else if (m->md.color == DCACHE_COLOR(pa)) {
+ PMAP_STATS_INC(pmap_nzero_page_idle_c);
va = TLB_PHYS_TO_DIRECT(pa);
bzero((void *)va, PAGE_SIZE);
} else {
+ PMAP_STATS_INC(pmap_nzero_page_idle_oc);
va = pmap_idle_map + (m->md.color * PAGE_SIZE);
tp = tsb_kvtotte(va);
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
@@ -1625,21 +1662,26 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
vm_offset_t vsrc;
struct tte *tp;
+ PMAP_STATS_INC(pmap_ncopy_page);
pdst = VM_PAGE_TO_PHYS(mdst);
psrc = VM_PAGE_TO_PHYS(msrc);
- if (msrc->md.color == -1 && mdst->md.color == -1)
+ if (msrc->md.color == -1 && mdst->md.color == -1) {
+ PMAP_STATS_INC(pmap_ncopy_page_nc);
ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
- else if (msrc->md.color == DCACHE_COLOR(psrc) &&
+ } else if (msrc->md.color == DCACHE_COLOR(psrc) &&
mdst->md.color == DCACHE_COLOR(pdst)) {
+ PMAP_STATS_INC(pmap_ncopy_page_c);
vdst = TLB_PHYS_TO_DIRECT(pdst);
vsrc = TLB_PHYS_TO_DIRECT(psrc);
bcopy((void *)vsrc, (void *)vdst, PAGE_SIZE);
} else if (msrc->md.color == -1) {
if (mdst->md.color == DCACHE_COLOR(pdst)) {
+ PMAP_STATS_INC(pmap_ncopy_page_dc);
vdst = TLB_PHYS_TO_DIRECT(pdst);
ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
PAGE_SIZE);
} else {
+ PMAP_STATS_INC(pmap_ncopy_page_doc);
vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
tp = tsb_kvtotte(vdst);
tp->tte_data =
@@ -1651,10 +1693,12 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
}
} else if (mdst->md.color == -1) {
if (msrc->md.color == DCACHE_COLOR(psrc)) {
+ PMAP_STATS_INC(pmap_ncopy_page_sc);
vsrc = TLB_PHYS_TO_DIRECT(psrc);
ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
PAGE_SIZE);
} else {
+ PMAP_STATS_INC(pmap_ncopy_page_soc);
vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
tp = tsb_kvtotte(vsrc);
tp->tte_data =
@@ -1665,6 +1709,7 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
tlb_page_demap(kernel_pmap, vsrc);
}
} else {
+ PMAP_STATS_INC(pmap_ncopy_page_oc);
vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
tp = tsb_kvtotte(vdst);
tp->tte_data =
diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c
index e242819..a827993 100644
--- a/sys/sparc64/sparc64/tlb.c
+++ b/sys/sparc64/sparc64/tlb.c
@@ -26,13 +26,17 @@
* $FreeBSD$
*/
+#include "opt_pmap.h"
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/ktr.h>
+#include <sys/linker_set.h>
#include <sys/pcpu.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/smp.h>
+#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -41,6 +45,10 @@
#include <machine/smp.h>
#include <machine/tlb.h>
+PMAP_STATS_VAR(tlb_ncontext_demap);
+PMAP_STATS_VAR(tlb_npage_demap);
+PMAP_STATS_VAR(tlb_nrange_demap);
+
int tlb_dtlb_entries;
int tlb_itlb_entries;
@@ -68,6 +76,7 @@ tlb_context_demap(struct pmap *pm)
* protect the target processor from entering the IPI handler with
* the lock held.
*/
+ PMAP_STATS_INC(tlb_ncontext_demap);
cookie = ipi_tlb_context_demap(pm);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
@@ -88,6 +97,7 @@ tlb_page_demap(struct pmap *pm, vm_offset_t va)
void *cookie;
u_long s;
+ PMAP_STATS_INC(tlb_npage_demap);
cookie = ipi_tlb_page_demap(pm, va);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
@@ -114,6 +124,7 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
u_long flags;
u_long s;
+ PMAP_STATS_INC(tlb_nrange_demap);
cookie = ipi_tlb_range_demap(pm, start, end);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
index 2be5d97..2f2df1d 100644
--- a/sys/sparc64/sparc64/tsb.c
+++ b/sys/sparc64/sparc64/tsb.c
@@ -65,32 +65,12 @@
CTASSERT((1 << TTE_SHIFT) == sizeof(struct tte));
CTASSERT(TSB_BUCKET_MASK < (1 << 12));
-#ifdef PMAP_STATS
-static long tsb_nrepl;
-static long tsb_nlookup_k;
-static long tsb_nlookup_u;
-static long tsb_nenter_k;
-static long tsb_nenter_u;
-static long tsb_nforeach;
-
-SYSCTL_DECL(_debug_pmap_stats);
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nrepl, CTLFLAG_RD, &tsb_nrepl, 0,
- "Number of TSB replacements");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nlookup_k, CTLFLAG_RD,
- &tsb_nlookup_k, 0, "Number of calls to tsb_tte_lookup(), kernel pmap");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nlookup_u, CTLFLAG_RD,
- &tsb_nlookup_u, 0, "Number of calls to tsb_tte_lookup(), user pmap");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nenter_k, CTLFLAG_RD,
- &tsb_nenter_k, 0, "Number of calls to tsb_tte_enter()");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nenter_u, CTLFLAG_RD,
- &tsb_nenter_u, 0, "Number of calls to tsb_tte_enter()");
-SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nforeach, CTLFLAG_RD,
- &tsb_nforeach, 0, "Number of calls to tsb_foreach()");
-
-#define TSB_STATS_INC(var) atomic_add_long(&var, 1)
-#else
-#define TSB_STATS_INC(var)
-#endif
+PMAP_STATS_VAR(tsb_nrepl);
+PMAP_STATS_VAR(tsb_nlookup_k);
+PMAP_STATS_VAR(tsb_nlookup_u);
+PMAP_STATS_VAR(tsb_nenter_k);
+PMAP_STATS_VAR(tsb_nenter_u);
+PMAP_STATS_VAR(tsb_nforeach);
struct tte *tsb_kernel;
vm_size_t tsb_kernel_mask;
@@ -106,12 +86,12 @@ tsb_tte_lookup(pmap_t pm, vm_offset_t va)
u_int i;
if (pm == kernel_pmap) {
- TSB_STATS_INC(tsb_nlookup_k);
+ PMAP_STATS_INC(tsb_nlookup_k);
tp = tsb_kvtotte(va);
if (tte_match(tp, va))
return (tp);
} else {
- TSB_STATS_INC(tsb_nlookup_u);
+ PMAP_STATS_INC(tsb_nlookup_u);
for (sz = TS_MIN; sz <= TS_MAX; sz++) {
bucket = tsb_vtobucket(pm, sz, va);
for (i = 0; i < TSB_BUCKET_SIZE; i++) {
@@ -135,14 +115,14 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data)
int i;
if (pm == kernel_pmap) {
- TSB_STATS_INC(tsb_nenter_k);
+ PMAP_STATS_INC(tsb_nenter_k);
tp = tsb_kvtotte(va);
KASSERT((tp->tte_data & TD_V) == 0,
("tsb_tte_enter: replacing valid kernel mapping"));
goto enter;
}
+ PMAP_STATS_INC(tsb_nenter_u);
- TSB_STATS_INC(tsb_nenter_u);
bucket = tsb_vtobucket(pm, sz, va);
tp = NULL;
@@ -165,7 +145,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data)
if (tp == NULL)
tp = rtp;
if ((tp->tte_data & TD_V) != 0) {
- TSB_STATS_INC(tsb_nrepl);
+ PMAP_STATS_INC(tsb_nrepl);
ova = TTE_GET_VA(tp);
vm_page_lock_queues();
pmap_remove_tte(pm, NULL, tp, ova);
@@ -204,7 +184,7 @@ tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
struct tte *tp;
int i;
- TSB_STATS_INC(tsb_nforeach);
+ PMAP_STATS_INC(tsb_nforeach);
for (i = 0; i < TSB_SIZE; i++) {
tp = &pm1->pm_tsb[i];
if ((tp->tte_data & TD_V) != 0) {
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index 548fceb..344d7c5 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -43,12 +43,16 @@
* $FreeBSD$
*/
+#include "opt_pmap.h"
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/linker_set.h>
+#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/user.h>
#include <sys/vmmeter.h>
@@ -73,6 +77,10 @@
#include <machine/tlb.h>
#include <machine/tstate.h>
+PMAP_STATS_VAR(uma_nsmall_alloc);
+PMAP_STATS_VAR(uma_nsmall_alloc_oc);
+PMAP_STATS_VAR(uma_nsmall_free);
+
void
cpu_exit(struct thread *td)
{
@@ -310,6 +318,8 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
int pflags;
void *va;
+ PMAP_STATS_INC(uma_nsmall_alloc);
+
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
@@ -327,6 +337,7 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
if (m->md.color != DCACHE_COLOR(pa)) {
KASSERT(m->md.colors[0] == 0 && m->md.colors[1] == 0,
("uma_small_alloc: free page still has mappings!"));
+ PMAP_STATS_INC(uma_nsmall_alloc_oc);
m->md.color = DCACHE_COLOR(pa);
dcache_page_inval(pa);
}
@@ -344,6 +355,7 @@ uma_small_free(void *mem, int size, u_int8_t flags)
{
vm_page_t m;
+ PMAP_STATS_INC(uma_nsmall_free);
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
vm_page_lock_queues();
vm_page_free(m);
OpenPOWER on IntegriCloud