summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Gibson <dwg@au1.ibm.com>2008-09-05 11:49:54 +1000
committerPaul Mackerras <paulus@samba.org>2008-09-15 11:08:47 -0700
commit0b26425ce101a7c1b1ad4ec353d4e860223d9fc4 (patch)
treeea179429b60efda0e4ce60ca41ad974f921c5fbb
parent150c6c8fecf6daaf68c2987ba2b6b259baefdff2 (diff)
downloadop-kernel-dev-0b26425ce101a7c1b1ad4ec353d4e860223d9fc4.zip
op-kernel-dev-0b26425ce101a7c1b1ad4ec353d4e860223d9fc4.tar.gz
powerpc: Clean up hugepage pagetable allocation for powerpc with 16G pages
There is a small bug in the handling of 16G hugepages recently added to the kernel. This doesn't cause a crash or other user-visible problems, but it does mean that more levels of pagetable are allocated than makes sense for 16G pages. The hugepage pagetables for the 16G pages are allocated much lower in the pagetable tree than they should be, with the intervening levels allocated with full pmd and pud pages which will only ever have one entry filled in. This corrects this problem, at the same time cleaning up the handling of which level 64k versus 16M hugepage pagetables are allocated at. The new way of formatting the tests should be more robust against changes in pagetable structure, or any newly added hugepage sizes. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/mm/hugetlbpage.c59
1 files changed, 33 insertions, 26 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f1c2d55..a117024 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -128,29 +128,37 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
return 0;
}
-/* Base page size affects how we walk hugetlb page tables */
-#ifdef CONFIG_PPC_64K_PAGES
-#define hpmd_offset(pud, addr, h) pmd_offset(pud, addr)
-#define hpmd_alloc(mm, pud, addr, h) pmd_alloc(mm, pud, addr)
-#else
-static inline
-pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
+
+static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
+{
+ if (huge_page_shift(hstate) < PUD_SHIFT)
+ return pud_offset(pgd, addr);
+ else
+ return (pud_t *) pgd;
+}
+static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
+ struct hstate *hstate)
{
- if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
+ if (huge_page_shift(hstate) < PUD_SHIFT)
+ return pud_alloc(mm, pgd, addr);
+ else
+ return (pud_t *) pgd;
+}
+static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
+{
+ if (huge_page_shift(hstate) < PMD_SHIFT)
return pmd_offset(pud, addr);
else
return (pmd_t *) pud;
}
-static inline
-pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
- struct hstate *hstate)
+static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
+ struct hstate *hstate)
{
- if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
+ if (huge_page_shift(hstate) < PMD_SHIFT)
return pmd_alloc(mm, pud, addr);
else
return (pmd_t *) pud;
}
-#endif
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup.
@@ -204,7 +212,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pg = pgd_offset(mm, addr);
if (!pgd_none(*pg)) {
- pu = pud_offset(pg, addr);
+ pu = hpud_offset(pg, addr, hstate);
if (!pud_none(*pu)) {
pm = hpmd_offset(pu, addr, hstate);
if (!pmd_none(*pm))
@@ -233,7 +241,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
addr &= hstate->mask;
pg = pgd_offset(mm, addr);
- pu = pud_alloc(mm, pg, addr);
+ pu = hpud_alloc(mm, pg, addr, hstate);
if (pu) {
pm = hpmd_alloc(mm, pu, addr, hstate);
@@ -316,13 +324,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
-#ifdef CONFIG_PPC_64K_PAGES
- if (pud_none_or_clear_bad(pud))
- continue;
- hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling,
- psize);
-#else
- if (shift == PAGE_SHIFT_64K) {
+ if (shift < PMD_SHIFT) {
if (pud_none_or_clear_bad(pud))
continue;
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
@@ -332,7 +334,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
continue;
free_hugepte_range(tlb, (hugepd_t *)pud, psize);
}
-#endif
} while (pud++, addr = next, addr != end);
start &= PGDIR_MASK;
@@ -422,9 +423,15 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
psize = get_slice_psize(tlb->mm, addr);
BUG_ON(!mmu_huge_psizes[psize]);
next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ if (mmu_psize_to_shift(psize) < PUD_SHIFT) {
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ } else {
+ if (pgd_none(*pgd))
+ continue;
+ free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
+ }
} while (pgd++, addr = next, addr != end);
}
OpenPOWER on IntegriCloud