diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-04 09:16:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-04 09:16:03 -0800 |
commit | d2fc0bacd5c438cb459fdf531eff00ab18422a00 (patch) | |
tree | d0ea52e4d2ad2fac12e19eaf6891c6af98353cfc /arch/x86/mm/pgtable_32.c | |
parent | 93890b71a34f9490673a6edd56b61c2124215e46 (diff) | |
parent | 795d45b22c079946332bf3825afefe5a981a97b6 (diff) | |
download | op-kernel-dev-d2fc0bacd5c438cb459fdf531eff00ab18422a00.zip op-kernel-dev-d2fc0bacd5c438cb459fdf531eff00ab18422a00.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (78 commits)
x86: fix RTC lockdep warning: potential hardirq recursion
x86: cpa, micro-optimization
x86: cpa, clean up code flow
x86: cpa, eliminate CPA_ enum
x86: cpa, cleanups
x86: implement gbpages support in change_page_attr()
x86: support gbpages in pagetable dump
x86: add gbpages support to lookup_address
x86: add pgtable accessor functions for gbpages
x86: add PUD_PAGE_SIZE
x86: add feature macros for the gbpages cpuid bit
x86: switch direct mapping setup over to set_pte
x86: fix page-present check in cpa_flush_range
x86: remove cpa warning
x86: remove now unused clear_kernel_mapping
x86: switch pci-gart over to using set_memory_np() instead of clear_kernel_mapping()
x86: cpa selftest, skip non present entries
x86: CPA fix pagetable split
x86: rename LARGE_PAGE_SIZE to PMD_PAGE_SIZE
x86: cpa, fix lookup_address
...
Diffstat (limited to 'arch/x86/mm/pgtable_32.c')
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 61 |
1 files changed, 20 insertions, 41 deletions
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index cb3aa47..c7db504 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd) list_del(&page->lru); } +#define UNSHARED_PTRS_PER_PGD \ + (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) - -#if (PTRS_PER_PMD == 1) -/* Non-PAE pgd constructor */ -static void pgd_ctor(void *pgd) +static void pgd_ctor(void *p) { + pgd_t *pgd = p; unsigned long flags; - /* !PAE, no pagetable sharing */ + /* Clear usermode parts of PGD */ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); spin_lock_irqsave(&pgd_lock, flags); - /* must happen under lock */ - clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - KERNEL_PGD_PTRS); - paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, - __pa(swapper_pg_dir) >> PAGE_SHIFT, - USER_PTRS_PER_PGD, - KERNEL_PGD_PTRS); - pgd_list_add(pgd); - spin_unlock_irqrestore(&pgd_lock, flags); -} -#else /* PTRS_PER_PMD > 1 */ -/* PAE pgd constructor */ -static void pgd_ctor(void *pgd) -{ - /* PAE, kernel PMD may be shared */ - - if (SHARED_KERNEL_PMD) { - clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, + /* If the pgd points to a shared pagetable level (either the + ptes in non-PAE, or shared PMD in PAE), then just copy the + references from swapper_pg_dir. */ + if (PAGETABLE_LEVELS == 2 || + (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { + clone_pgd_range(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); - } else { - unsigned long flags; + paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, + __pa(swapper_pg_dir) >> PAGE_SHIFT, + USER_PTRS_PER_PGD, + KERNEL_PGD_PTRS); + } - memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); - spin_lock_irqsave(&pgd_lock, flags); + /* list required to sync kernel mapping updates */ + if (!SHARED_KERNEL_PMD) pgd_list_add(pgd); - spin_unlock_irqrestore(&pgd_lock, flags); - } + + spin_unlock_irqrestore(&pgd_lock, flags); } -#endif /* PTRS_PER_PMD */ static void pgd_dtor(void *pgd) { @@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd) spin_unlock_irqrestore(&pgd_lock, flags); } -#define UNSHARED_PTRS_PER_PGD \ - (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) - #ifdef CONFIG_X86_PAE /* * Mop up any pmd pages which may still be attached to the pgd. @@ -387,13 +373,6 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { - /* This is called just after the pmd has been detached from - the pgd, which requires a full tlb flush to be recognized - by the CPU. Rather than incurring multiple tlb flushes - while the address space is being pulled down, make the tlb - gathering machinery do a full flush when we're done. */ - tlb->fullmm = 1; - paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); tlb_remove_page(tlb, virt_to_page(pmd)); } |