summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 13:33:39 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:33:39 +0100
commit1c70e9bd832642b712181e32d1bbf2436058a3df (patch)
tree3e9a57b7ae9d0357bb9727ac4ed9f77a3eb88d52
parenta5a19c63f4e55e32dc0bc3d936d7f94793d8b380 (diff)
downloadop-kernel-dev-1c70e9bd832642b712181e32d1bbf2436058a3df.zip
op-kernel-dev-1c70e9bd832642b712181e32d1bbf2436058a3df.tar.gz
xen: deal with pmd being allocated/freed
Deal properly with pmd-level pages being allocated and freed dynamically. We can handle them more or less the same as pte pages. Also, deal with early_ioremap pagetable manipulations. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/xen/enlighten.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 845b4fd..de647bc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -658,6 +658,13 @@ static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
}
+/* Early release_pt assumes that all pts are pinned, since there's
+ only init_mm and anything attached to that is pinned. */
+static void xen_release_pt_init(u32 pfn)
+{
+ make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+}
+
static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
{
struct mmuext_op op;
@@ -669,7 +676,7 @@ static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
-static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
@@ -678,7 +685,7 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
if (!PageHighMem(page)) {
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+ pin_pagetable_pfn(level, pfn);
} else
/* make sure there are no stray mappings of
this page */
@@ -686,6 +693,16 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
}
}
+static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+{
+ xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L1_TABLE);
+}
+
+static void xen_alloc_pd(struct mm_struct *mm, u32 pfn)
+{
+ xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L2_TABLE);
+}
+
/* This should never happen until we're OK to use struct page */
static void xen_release_pt(u32 pfn)
{
@@ -788,6 +805,9 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
/* This will work as long as patching hasn't happened yet
(which it hasn't) */
pv_mmu_ops.alloc_pt = xen_alloc_pt;
+ pv_mmu_ops.alloc_pd = xen_alloc_pd;
+ pv_mmu_ops.release_pt = xen_release_pt;
+ pv_mmu_ops.release_pd = xen_release_pt;
pv_mmu_ops.set_pte = xen_set_pte;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1011,10 +1031,10 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.pte_update_defer = paravirt_nop,
.alloc_pt = xen_alloc_pt_init,
- .release_pt = xen_release_pt,
- .alloc_pd = paravirt_nop,
+ .release_pt = xen_release_pt_init,
+ .alloc_pd = xen_alloc_pt_init,
.alloc_pd_clone = paravirt_nop,
- .release_pd = paravirt_nop,
+ .release_pd = xen_release_pt_init,
#ifdef CONFIG_HIGHPTE
.kmap_atomic_pte = xen_kmap_atomic_pte,
OpenPOWER on IntegriCloud