diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-07-17 18:37:03 -0700 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-07-18 08:47:40 -0700 |
commit | fdb4c338c8d1d494e17c3422a3ea2129f6791596 (patch) | |
tree | 08305dcbbf4179299f5d001796d658ff08ecc014 | |
parent | 810bab448e563ffd1718d78e9a3756806b626acc (diff) | |
download | op-kernel-dev-fdb4c338c8d1d494e17c3422a3ea2129f6791596.zip op-kernel-dev-fdb4c338c8d1d494e17c3422a3ea2129f6791596.tar.gz |
paravirt: add an "mm" argument to alloc_pt
It's useful to know which mm is allocating a pagetable. Xen uses this
to determine whether the pagetable being added to is pinned or not.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
-rw-r--r-- | arch/i386/kernel/vmi.c | 2 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 2 | ||||
-rw-r--r-- | arch/i386/mm/pageattr.c | 2 | ||||
-rw-r--r-- | include/asm-i386/paravirt.h | 6 | ||||
-rw-r--r-- | include/asm-i386/pgalloc.h | 6 |
5 files changed, 9 insertions, 9 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index c12720d..234bd6f 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -362,7 +362,7 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) } #endif -static void vmi_allocate_pt(u32 pfn) +static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn) { vmi_set_page_type(pfn, VMI_PAGE_L1); vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 7135946..f9b6a88 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -87,7 +87,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); - paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT); + paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 2eb14a7..37992ff 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -60,7 +60,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); - paravirt_alloc_pt(page_to_pfn(base)); + paravirt_alloc_pt(&init_mm, page_to_pfn(base)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : ref_prot)); diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 7f846a7..99bf661 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h @@ -173,7 +173,7 @@ struct paravirt_ops unsigned long va); /* Hooks for allocating/releasing pagetable pages */ - void (*alloc_pt)(u32 pfn); + void (*alloc_pt)(struct mm_struct *mm, u32 pfn); void (*alloc_pd)(u32 pfn); void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); void (*release_pt)(u32 pfn); @@ -725,9 +725,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); } -static inline void paravirt_alloc_pt(unsigned pfn) +static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) { - PVOP_VCALL1(alloc_pt, pfn); + PVOP_VCALL2(alloc_pt, mm, pfn); } static inline void paravirt_release_pt(unsigned pfn) { diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h index d07b7af..f2fc33c 100644 --- a/include/asm-i386/pgalloc.h +++ b/include/asm-i386/pgalloc.h @@ -7,7 +7,7 @@ #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #else -#define paravirt_alloc_pt(pfn) do { } while (0) +#define paravirt_alloc_pt(mm, pfn) do { } while (0) #define paravirt_alloc_pd(pfn) do { } while (0) #define paravirt_alloc_pd(pfn) do { } while (0) #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) @@ -17,13 +17,13 @@ #define pmd_populate_kernel(mm, pmd, pte) \ do { \ - paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \ + paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ } while (0) #define pmd_populate(mm, pmd, pte) \ do { \ - paravirt_alloc_pt(page_to_pfn(pte)); \ + paravirt_alloc_pt(mm, page_to_pfn(pte)); \ set_pmd(pmd, __pmd(_PAGE_TABLE + \ ((unsigned long long)page_to_pfn(pte) << \ (unsigned long long) PAGE_SHIFT))); \ |