summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2008-08-31 20:08:13 +0000
committermarcel <marcel@FreeBSD.org>2008-08-31 20:08:13 +0000
commit970ea61218d4570229e808551ed511961aed400e (patch)
tree63bc40a26d5c81d7c6e600b8dc4640d5fd41ae6a /sys/powerpc
parent3e4620e0519ed30816fed26ced1fc802ced55e7c (diff)
downloadFreeBSD-src-970ea61218d4570229e808551ed511961aed400e.zip
FreeBSD-src-970ea61218d4570229e808551ed511961aed400e.tar.gz
Trace all PMAP calls using KTR_PMAP.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c94
1 files changed, 92 insertions, 2 deletions
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 7e0c4ff..55112bb 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
+#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/systm.h>
@@ -78,18 +79,24 @@ int pmap_bootstrapped;
void
pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
{
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
}
void
pmap_clear_modify(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_CLEAR_MODIFY(mmu_obj, m);
}
void
pmap_clear_reference(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_CLEAR_REFERENCE(mmu_obj, m);
}
@@ -97,12 +104,17 @@ void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
vm_size_t len, vm_offset_t src_addr)
{
+
+ CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
+ src_pmap, dst_addr, len, src_addr);
MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
}
void
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
+
+ CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
MMU_COPY_PAGE(mmu_obj, src, dst);
}
@@ -110,6 +122,9 @@ void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
vm_prot_t prot, boolean_t wired)
{
+
+ CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
+ access, p, prot, wired);
MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
}
@@ -117,60 +132,82 @@ void
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
+
+ CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
+ end, m_start, prot);
MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
}
void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
+
+ CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
}
vm_paddr_t
pmap_extract(pmap_t pmap, vm_offset_t va)
{
+
+ CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
return (MMU_EXTRACT(mmu_obj, pmap, va));
}
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
}
void
pmap_growkernel(vm_offset_t va)
{
+
+ CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
MMU_GROWKERNEL(mmu_obj, va);
}
void
pmap_init(void)
{
+
+ CTR1(KTR_PMAP, "%s()", __func__);
MMU_INIT(mmu_obj);
}
boolean_t
pmap_is_modified(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (MMU_IS_MODIFIED(mmu_obj, m));
}
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
{
+
+ CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
}
boolean_t
pmap_ts_referenced(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (MMU_TS_REFERENCED(mmu_obj, m));
}
vm_offset_t
pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
{
+
+ CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
+ prot);
return (MMU_MAP(mmu_obj, virt, start, end, prot));
}
@@ -178,18 +215,25 @@ void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
+
+ CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
+ object, pindex, size);
MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
}
boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
+
+ CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
}
void
pmap_page_init(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_PAGE_INIT(mmu_obj, m);
}
@@ -197,12 +241,15 @@ int
pmap_page_wired_mappings(vm_page_t m)
{
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
}
int
pmap_pinit(pmap_t pmap)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
MMU_PINIT(mmu_obj, pmap);
return (1);
}
@@ -210,90 +257,121 @@ pmap_pinit(pmap_t pmap)
void
pmap_pinit0(pmap_t pmap)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
MMU_PINIT0(mmu_obj, pmap);
}
void
pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
{
+
+ CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
+ prot);
MMU_PROTECT(mmu_obj, pmap, start, end, prot);
}
void
pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
{
+
+ CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
MMU_QENTER(mmu_obj, start, m, count);
}
void
pmap_qremove(vm_offset_t start, int count)
{
+
+ CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
MMU_QREMOVE(mmu_obj, start, count);
}
void
pmap_release(pmap_t pmap)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
MMU_RELEASE(mmu_obj, pmap);
}
void
pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
{
+
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
MMU_REMOVE(mmu_obj, pmap, start, end);
}
void
pmap_remove_all(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_REMOVE_ALL(mmu_obj, m);
}
void
pmap_remove_pages(pmap_t pmap)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
MMU_REMOVE_PAGES(mmu_obj, pmap);
}
void
pmap_remove_write(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_REMOVE_WRITE(mmu_obj, m);
}
void
pmap_zero_page(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_ZERO_PAGE(mmu_obj, m);
}
void
pmap_zero_page_area(vm_page_t m, int off, int size)
{
+
+ CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
}
void
pmap_zero_page_idle(vm_page_t m)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
MMU_ZERO_PAGE_IDLE(mmu_obj, m);
}
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
{
+
+ CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
return (MMU_MINCORE(mmu_obj, pmap, addr));
}
void
pmap_activate(struct thread *td)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, td);
MMU_ACTIVATE(mmu_obj, td);
}
void
pmap_deactivate(struct thread *td)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, td);
MMU_DEACTIVATE(mmu_obj, td);
}
@@ -306,11 +384,11 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
vm_offset_t *addr, vm_size_t size)
{
+ CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
+ size);
MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
}
-
-
/*
* Routines used in machine-dependent code
*/
@@ -332,36 +410,48 @@ pmap_bootstrap(vm_offset_t start, vm_offset_t end)
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
+
+ CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
return (MMU_MAPDEV(mmu_obj, pa, size));
}
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
+
+ CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
MMU_UNMAPDEV(mmu_obj, va, size);
}
vm_offset_t
pmap_kextract(vm_offset_t va)
{
+
+ CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
return (MMU_KEXTRACT(mmu_obj, va));
}
void
pmap_kenter(vm_offset_t va, vm_offset_t pa)
{
+
+ CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
MMU_KENTER(mmu_obj, va, pa);
}
boolean_t
pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
{
+
+ CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
}
boolean_t
pmap_page_executable(vm_page_t pg)
{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, pg);
return (MMU_PAGE_EXECUTABLE(mmu_obj, pg));
}
OpenPOWER on IntegriCloud