From 765129594494994e3de91182857bc2586dbe21c9 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:36 +0000 Subject: powerpc: Only define HAVE_ARCH_HUGETLB_UNMAPPED_AREA if PPC_MM_SLICES If we don't have slices, we should be able to use the generic hugetlb_get_unmapped_area() code Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hugetlbpage.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5964371..6b1cf64 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -697,19 +697,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, return 1; } +#ifdef CONFIG_PPC_MM_SLICES unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { -#ifdef CONFIG_PPC_MM_SLICES struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); -#else - return get_unmapped_area(file, addr, len, pgoff, flags); -#endif } +#endif unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { -- cgit v1.1 From a1cd54198811e3c35fcaabdb94767b307f7ad1db Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:39 +0000 Subject: powerpc: Update hugetlb huge_pte_alloc and tablewalk code for FSL BOOKE This updates the hugetlb page table code to handle 64-bit FSL_BOOKE. The previous 32-bit work counted on the inner levels of the page table collapsing. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hugetlbpage.c | 48 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6b1cf64..96178e8 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -155,11 +155,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, hpdp->pd = 0; kmem_cache_free(cachep, new); } +#else + if (!hugepd_none(*hpdp)) + kmem_cache_free(cachep, new); + else + hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; #endif spin_unlock(&mm->page_table_lock); return 0; } +/* + * These macros define how to determine which level of the page table holds + * the hpdp. + */ +#ifdef CONFIG_PPC_FSL_BOOK3E +#define HUGEPD_PGD_SHIFT PGDIR_SHIFT +#define HUGEPD_PUD_SHIFT PUD_SHIFT +#else +#define HUGEPD_PGD_SHIFT PUD_SHIFT +#define HUGEPD_PUD_SHIFT PMD_SHIFT +#endif + pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pg; @@ -172,12 +189,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz addr &= ~(sz-1); pg = pgd_offset(mm, addr); - if (pshift >= PUD_SHIFT) { + + if (pshift >= HUGEPD_PGD_SHIFT) { hpdp = (hugepd_t *)pg; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); - if (pshift >= PMD_SHIFT) { + if (pshift >= HUGEPD_PUD_SHIFT) { hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; @@ -453,14 +471,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long start; start = addr; - pmd = pmd_offset(pud, addr); do { + pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) continue; +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at this level for a + * single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); +#endif free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, addr, next, floor, ceiling); - } while (pmd++, addr = next, addr != end); + } while (addr = next, addr != end); start &= PUD_MASK; if (start < floor) @@ -487,8 +514,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long start; start = addr; - pud = pud_offset(pgd, addr); do { + pud = pud_offset(pgd, addr); next = pud_addr_end(addr, end); if (!is_hugepd(pud)) { if (pud_none_or_clear_bad(pud)) @@ -496,10 +523,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } else { +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at this level for a + * single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); +#endif free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, addr, next, floor, ceiling); } - } while (pud++, addr = next, addr != end); + } while (addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) -- cgit v1.1 From 881fde1db591628db0494e77cd9002b0ba8b04b7 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:40 +0000 Subject: powerpc: hugetlb: modify include usage for FSL BookE code The original 32-bit hugetlb implementation used PPC64 vs PPC32 to determine which code path to take. However, the final hugetlb implementation for 64-bit FSL ended up shared with the FSL 32-bit code so the actual check needs to be FSL_BOOK3E vs everything else. This patch changes the include protections to reflect this. There are also a couple of related comment fixes. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hugetlbpage.c | 54 ++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 29 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 96178e8..7c7cb97 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -33,17 +33,17 @@ unsigned int HPAGE_SHIFT; * implementations may have more than one gpage size due to limitations * of the memory allocators, so we need multiple arrays */ -#ifdef CONFIG_PPC64 -#define MAX_NUMBER_GPAGES 1024 -static u64 gpage_freearray[MAX_NUMBER_GPAGES]; -static unsigned nr_gpages; -#else +#ifdef CONFIG_PPC_FSL_BOOK3E #define MAX_NUMBER_GPAGES 128 struct psize_gpages { u64 gpage_list[MAX_NUMBER_GPAGES]; unsigned int nr_gpages; }; static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; +#else +#define MAX_NUMBER_GPAGES 1024 +static u64 gpage_freearray[MAX_NUMBER_GPAGES]; +static unsigned nr_gpages; #endif static inline int shift_to_mmu_psize(unsigned int shift) @@ -114,12 +114,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, struct kmem_cache *cachep; pte_t *new; -#ifdef CONFIG_PPC64 - cachep = PGT_CACHE(pdshift - pshift); -#else +#ifdef CONFIG_PPC_FSL_BOOK3E int i; int num_hugepd = 1 << (pshift - pdshift); cachep = hugepte_cache; +#else + cachep = PGT_CACHE(pdshift - pshift); #endif new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); @@ -131,12 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, return -ENOMEM; spin_lock(&mm->page_table_lock); -#ifdef CONFIG_PPC64 - if (!hugepd_none(*hpdp)) - kmem_cache_free(cachep, new); - else - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; -#else +#ifdef CONFIG_PPC_FSL_BOOK3E /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. @@ -215,7 +210,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(hpdp, addr, pdshift); } -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. */ @@ -335,7 +330,7 @@ void __init reserve_hugetlb_gpages(void) } } -#else /* PPC64 */ +#else /* !PPC_FSL_BOOK3E */ /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. @@ -373,7 +368,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 0; } -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) @@ -433,11 +428,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; -#ifdef CONFIG_PPC64 - unsigned int shift = hugepd_shift(*hpdp); -#else - /* Note: On 32-bit the hpdp may be the first of several */ +#ifdef CONFIG_PPC_FSL_BOOK3E + /* Note: On fsl the hpdp may be the first of several */ num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); +#else + unsigned int shift = hugepd_shift(*hpdp); #endif start &= pdmask; @@ -455,10 +450,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif hpdp->pd = 0; tlb->need_flush = 1; -#ifdef CONFIG_PPC64 - pgtable_free_tlb(tlb, hugepte, pdshift - shift); -#else + +#ifdef CONFIG_PPC_FSL_BOOK3E hugepd_free(tlb, hugepte); +#else + pgtable_free_tlb(tlb, hugepte, pdshift - shift); #endif } @@ -590,12 +586,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since - * on 32-bit there may be more than one entry at the pgd - * level for a single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. + * there may be more than one entry at the pgd level + * for a single hugepage, but all of them point to the + * same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); #endif @@ -817,7 +813,7 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_FSL_BOOK3E struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { -- cgit v1.1 From a6146888be0aa80ea41c99178d7d2e08efc776b5 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:43 +0000 Subject: powerpc: Add gpages reservation code for 64-bit FSL BOOKE For 64-bit FSL_BOOKE implementations, gigantic pages need to be reserved at boot time by the memblock code based on the command line. This adds the call that handles the reservation, and fixes some code comments. It also removes the previous pr_err when reserve_hugetlb_gpages is called on a system without hugetlb enabled - the way the code is structured, the call is unconditional and the resulting error message spurious and confusing. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hugetlbpage.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/mm/hugetlbpage.c') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7c7cb97..79c575d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -28,10 +28,10 @@ unsigned int HPAGE_SHIFT; /* * Tracks gpages after the device tree is scanned and before the - * huge_boot_pages list is ready. On 64-bit implementations, this is - * just used to track 16G pages and so is a single array. 32-bit - * implementations may have more than one gpage size due to limitations - * of the memory allocators, so we need multiple arrays + * huge_boot_pages list is ready. On non-Freescale implementations, this is + * just used to track 16G pages and so is a single array. FSL-based + * implementations may have more than one gpage size, so we need multiple + * arrays */ #ifdef CONFIG_PPC_FSL_BOOK3E #define MAX_NUMBER_GPAGES 128 -- cgit v1.1