summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcperciva <cperciva@FreeBSD.org>2010-11-20 20:04:29 +0000
committercperciva <cperciva@FreeBSD.org>2010-11-20 20:04:29 +0000
commitb6354cc05660d9d9665630f7b2fbf1b8f3a63c94 (patch)
tree7a2116fa846dae677377a526b8777178a390f377
parent70881bc10bbea544ba7d7d89723de0868bd07916 (diff)
downloadFreeBSD-src-b6354cc05660d9d9665630f7b2fbf1b8f3a63c94.zip
FreeBSD-src-b6354cc05660d9d9665630f7b2fbf1b8f3a63c94.tar.gz
Add VTOM(va) macro as xpmap_ptom(VTOP(va)) to convert to machine addresses.
Clean up the code by converting xpmap_ptom(VTOP(...)) to VTOM(...) and converting xpmap_ptom(VM_PAGE_TO_PHYS(...)) to VM_PAGE_TO_MACH(...). In a few places we take advantage of the fact that xpmap_ptom can commute with setting PG_* flags. This commit should have no net effect save to improve the readability of this code.
-rw-r--r--sys/i386/include/pmap.h2
-rw-r--r--sys/i386/xen/mp_machdep.c11
-rw-r--r--sys/i386/xen/pmap.c30
-rw-r--r--sys/i386/xen/xen_machdep.c26
4 files changed, 34 insertions, 35 deletions
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index e62e989..7b664e5 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -221,6 +221,8 @@ extern pt_entry_t pg_nx;
#define MACH_TO_VM_PAGE(ma) PHYS_TO_VM_PAGE(xpmap_mtop((ma)))
#define VM_PAGE_TO_MACH(m) xpmap_ptom(VM_PAGE_TO_PHYS((m)))
+#define VTOM(va) xpmap_ptom(VTOP(va))
+
static __inline vm_paddr_t
pmap_kextract_ma(vm_offset_t va)
{
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c
index 60165cb..8edbb7f 100644
--- a/sys/i386/xen/mp_machdep.c
+++ b/sys/i386/xen/mp_machdep.c
@@ -749,7 +749,7 @@ start_all_aps(void)
gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
- PT_SET_MA(bootAPgdt, xpmap_ptom(VTOP(bootAPgdt)) | PG_V | PG_RW);
+ PT_SET_MA(bootAPgdt, VTOM(bootAPgdt) | PG_V | PG_RW);
bzero(bootAPgdt, PAGE_SIZE);
for (x = 0; x < NGDT; x++)
ssdtosd(&gdt_segs[x], &bootAPgdt[x].sd);
@@ -833,14 +833,13 @@ cpu_initialize_context(unsigned int cpu)
}
boot_stack = kmem_alloc_nofault(kernel_map, 1);
newPTD = kmem_alloc_nofault(kernel_map, NPGPTD);
- ma[0] = xpmap_ptom(VM_PAGE_TO_PHYS(m[0]))|PG_V;
+ ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;
#ifdef PAE
pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
for (i = 0; i < NPGPTD; i++) {
((vm_paddr_t *)boot_stack)[i] =
- ma[i] =
- xpmap_ptom(VM_PAGE_TO_PHYS(m[i]))|PG_V;
+ ma[i] = VM_PAGE_TO_MACH(m[i])|PG_V;
}
#endif
@@ -862,7 +861,7 @@ cpu_initialize_context(unsigned int cpu)
pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));
- xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(m[NPGPTD + 1])));
+ xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
vm_page_lock_queues();
for (i = 0; i < 4; i++) {
int pdir = (PTDPTDI + i) / NPDEPG;
@@ -905,7 +904,7 @@ cpu_initialize_context(unsigned int cpu)
ctxt.failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
- ctxt.ctrlreg[3] = xpmap_ptom(VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
+ ctxt.ctrlreg[3] = VM_PAGE_TO_MACH(m[NPGPTD + 1]);
#else /* __x86_64__ */
ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 0efdc40..05f2217 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -1355,7 +1355,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
pte = vtopte(sva);
endpte = pte + count;
while (pte < endpte) {
- pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V | PG_M | PG_A;
+ pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A;
mclp->op = __HYPERVISOR_update_va_mapping;
mclp->args[0] = va;
@@ -1589,7 +1589,7 @@ pmap_pinit(pmap_t pmap)
for (i = 0; i < NPGPTD; i++) {
vm_paddr_t ma;
- ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
+ ma = VM_PAGE_TO_MACH(ptdpg[i]);
pmap->pm_pdpt[i] = ma | PG_V;
}
@@ -1599,7 +1599,7 @@ pmap_pinit(pmap_t pmap)
pt_entry_t *pd;
vm_paddr_t ma;
- ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
+ ma = VM_PAGE_TO_MACH(ptdpg[i]);
pd = pmap->pm_pdir + (i * NPDEPG);
PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW));
#if 0
@@ -1612,9 +1612,9 @@ pmap_pinit(pmap_t pmap)
#endif
vm_page_lock_queues();
xen_flush_queue();
- xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD])));
+ xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD]));
for (i = 0; i < NPGPTD; i++) {
- vm_paddr_t ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
+ vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]);
PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE);
}
xen_flush_queue();
@@ -1669,7 +1669,7 @@ _pmap_allocpte(pmap_t pmap, unsigned int ptepindex, int flags)
*/
pmap->pm_stats.resident_count++;
- ptema = xpmap_ptom(VM_PAGE_TO_PHYS(m));
+ ptema = VM_PAGE_TO_MACH(m);
xen_pt_pin(ptema);
PT_SET_VA_MA(&pmap->pm_pdir[ptepindex],
(ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
@@ -1873,7 +1873,7 @@ pmap_release(pmap_t pmap)
for (i = 0; i < npgptd; i++) {
m = ptdpg[i];
- ma = xpmap_ptom(VM_PAGE_TO_PHYS(m));
+ ma = VM_PAGE_TO_MACH(m);
/* unpinning L1 and L2 treated the same */
#if 0
xen_pgd_unpin(ma);
@@ -1883,7 +1883,7 @@ pmap_release(pmap_t pmap)
#endif
#ifdef PAE
if (i < NPGPTD)
- KASSERT(xpmap_ptom(VM_PAGE_TO_PHYS(m)) == (pmap->pm_pdpt[i] & PG_FRAME),
+ KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME),
("pmap_release: got wrong ptd page"));
#endif
m->wire_count--;
@@ -2673,7 +2673,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
boolean_t invlva;
CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
- pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired);
+ pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired);
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
@@ -3364,7 +3364,7 @@ pmap_zero_page(vm_page_t m)
if (*sysmaps->CMAP2)
panic("pmap_zero_page: CMAP2 busy");
sched_pin();
- PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M);
+ PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M);
pagezero(sysmaps->CADDR2);
PT_SET_MA(sysmaps->CADDR2, 0);
sched_unpin();
@@ -3387,7 +3387,7 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
if (*sysmaps->CMAP2)
panic("pmap_zero_page: CMAP2 busy");
sched_pin();
- PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M);
+ PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M);
if (off == 0 && size == PAGE_SIZE)
pagezero(sysmaps->CADDR2);
@@ -3411,7 +3411,7 @@ pmap_zero_page_idle(vm_page_t m)
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
sched_pin();
- PT_SET_MA(CADDR3, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M);
+ PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M);
pagezero(CADDR3);
PT_SET_MA(CADDR3, 0);
sched_unpin();
@@ -3435,8 +3435,8 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
if (*sysmaps->CMAP2)
panic("pmap_copy_page: CMAP2 busy");
sched_pin();
- PT_SET_MA(sysmaps->CADDR1, PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A);
- PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M);
+ PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A);
+ PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M);
bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
PT_SET_MA(sysmaps->CADDR1, 0);
PT_SET_MA(sysmaps->CADDR2, 0);
@@ -4063,7 +4063,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
panic("pmap_page_set_memattr: CMAP2 busy");
sched_pin();
PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW |
- xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M |
+ VM_PAGE_TO_MACH(m) | PG_A | PG_M |
pmap_cache_bits(m->md.pat_mode, 0));
invlcaddr(sysmaps->CADDR2);
sva = (vm_offset_t)sysmaps->CADDR2;
diff --git a/sys/i386/xen/xen_machdep.c b/sys/i386/xen/xen_machdep.c
index 542f4df..aaad5d4 100644
--- a/sys/i386/xen/xen_machdep.c
+++ b/sys/i386/xen/xen_machdep.c
@@ -917,7 +917,7 @@ initvalues(start_info_t *startinfo)
l3_pages = 1;
l2_pages = 0;
IdlePDPT = (pd_entry_t *)startinfo->pt_base;
- IdlePDPTma = xpmap_ptom(VTOP(startinfo->pt_base));
+ IdlePDPTma = VTOM(startinfo->pt_base);
for (i = (KERNBASE >> 30);
(i < 4) && (IdlePDPT[i] != 0); i++)
l2_pages++;
@@ -926,7 +926,7 @@ initvalues(start_info_t *startinfo)
* Thus, if KERNBASE
*/
for (i = 0; i < l2_pages; i++)
- IdlePTDma[i] = xpmap_ptom(VTOP(IdlePTD + i*PAGE_SIZE));
+ IdlePTDma[i] = VTOM(IdlePTD + i*PAGE_SIZE);
l2_pages = (l2_pages == 0) ? 1 : l2_pages;
#else
@@ -966,13 +966,12 @@ initvalues(start_info_t *startinfo)
IdlePDPTnew = (pd_entry_t *)cur_space; cur_space += PAGE_SIZE;
bzero(IdlePDPTnew, PAGE_SIZE);
- IdlePDPTnewma = xpmap_ptom(VTOP(IdlePDPTnew));
+ IdlePDPTnewma = VTOM(IdlePDPTnew);
IdlePTDnew = (pd_entry_t *)cur_space; cur_space += 4*PAGE_SIZE;
bzero(IdlePTDnew, 4*PAGE_SIZE);
for (i = 0; i < 4; i++)
- IdlePTDnewma[i] =
- xpmap_ptom(VTOP((uint8_t *)IdlePTDnew + i*PAGE_SIZE));
+ IdlePTDnewma[i] = VTOM((uint8_t *)IdlePTDnew + i*PAGE_SIZE);
/*
* L3
*
@@ -1040,7 +1039,7 @@ initvalues(start_info_t *startinfo)
IdlePTDnewma[i] | PG_V);
}
xen_load_cr3(VTOP(IdlePDPTnew));
- xen_pgdpt_pin(xpmap_ptom(VTOP(IdlePDPTnew)));
+ xen_pgdpt_pin(VTOM(IdlePDPTnew));
/* allocate remainder of nkpt pages */
cur_space_pt = cur_space;
@@ -1055,14 +1054,13 @@ initvalues(start_info_t *startinfo)
* make sure that all the initial page table pages
* have been zeroed
*/
- PT_SET_MA(cur_space,
- xpmap_ptom(VTOP(cur_space)) | PG_V | PG_RW);
+ PT_SET_MA(cur_space, VTOM(cur_space) | PG_V | PG_RW);
bzero((char *)cur_space, PAGE_SIZE);
PT_SET_MA(cur_space, (vm_paddr_t)0);
- xen_pt_pin(xpmap_ptom(VTOP(cur_space)));
+ xen_pt_pin(VTOM(cur_space));
xen_queue_pt_update((vm_paddr_t)(IdlePTDnewma[pdir] +
curoffset*sizeof(vm_paddr_t)),
- xpmap_ptom(VTOP(cur_space)) | PG_KERNEL);
+ VTOM(cur_space) | PG_KERNEL);
PT_UPDATES_FLUSH();
}
@@ -1113,14 +1111,14 @@ initvalues(start_info_t *startinfo)
#if 0
/* add page table for KERNBASE */
xen_queue_pt_update(IdlePTDma + KPTDI*sizeof(vm_paddr_t),
- xpmap_ptom(VTOP(cur_space) | PG_KERNEL));
+ VTOM(cur_space) | PG_KERNEL);
xen_flush_queue();
#ifdef PAE
xen_queue_pt_update(pdir_shadow_ma[3] + KPTDI*sizeof(vm_paddr_t),
- xpmap_ptom(VTOP(cur_space) | PG_V | PG_A));
+ VTOM(cur_space) | PG_V | PG_A);
#else
xen_queue_pt_update(pdir_shadow_ma + KPTDI*sizeof(vm_paddr_t),
- xpmap_ptom(VTOP(cur_space) | PG_V | PG_A));
+ VTOM(cur_space) | PG_V | PG_A);
#endif
xen_flush_queue();
cur_space += PAGE_SIZE;
@@ -1140,7 +1138,7 @@ initvalues(start_info_t *startinfo)
*/
for (i = (((vm_offset_t)&btext) & ~PAGE_MASK);
i < (((vm_offset_t)&etext) & ~PAGE_MASK); i += PAGE_SIZE)
- PT_SET_MA(i, xpmap_ptom(VTOP(i)) | PG_V | PG_A);
+ PT_SET_MA(i, VTOM(i) | PG_V | PG_A);
printk("#7\n");
physfree = VTOP(cur_space);
OpenPOWER on IntegriCloud