summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/mmu.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-10-20 15:42:07 +0200
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2015-01-12 08:16:52 +0000
commit8ce837cee8f51fb0eacb32c85461ea2f0fafc9f8 (patch)
tree14bb68cb91e5890156381969a1f1f9ca87cd2047 /arch/arm64/mm/mmu.c
parente1e1fddae74b72d0415965821ad00fe39aac6f13 (diff)
downloadop-kernel-dev-8ce837cee8f51fb0eacb32c85461ea2f0fafc9f8.zip
op-kernel-dev-8ce837cee8f51fb0eacb32c85461ea2f0fafc9f8.tar.gz
arm64/mm: add create_pgd_mapping() to create private page tables
For UEFI, we need to install the memory mappings used for Runtime Services in a dedicated set of page tables. Add create_pgd_mapping(), which allows us to allocate and install those page table entries early. Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Leif Lindholm <leif.lindholm@linaro.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Diffstat (limited to 'arch/arm64/mm/mmu.c')
-rw-r--r--arch/arm64/mm/mmu.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7d5dfe2..3f3d5aa 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -158,20 +158,10 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
- phys_addr_t phys, int map_io)
+ phys_addr_t phys, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
- pmdval_t prot_sect;
- pgprot_t prot_pte;
-
- if (map_io) {
- prot_sect = PROT_SECT_DEVICE_nGnRE;
- prot_pte = __pgprot(PROT_DEVICE_nGnRE);
- } else {
- prot_sect = PROT_SECT_NORMAL_EXEC;
- prot_pte = PAGE_KERNEL_EXEC;
- }
/*
* Check for initial section mappings in the pgd/pud and remove them.
@@ -187,7 +177,8 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
- set_pmd(pmd, __pmd(phys | prot_sect));
+ set_pmd(pmd, __pmd(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
@@ -196,7 +187,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
flush_tlb_all();
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot_pte);
+ prot);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
@@ -204,7 +195,7 @@ static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
- phys_addr_t phys, int map_io)
+ phys_addr_t phys, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
@@ -222,10 +213,11 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (!map_io && (PAGE_SHIFT == 12) &&
+ if ((PAGE_SHIFT == 12) &&
((addr | next | phys) & ~PUD_MASK) == 0) {
pud_t old_pud = *pud;
- set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
+ set_pud(pud, __pud(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
@@ -240,7 +232,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
flush_tlb_all();
}
} else {
- alloc_init_pmd(mm, pud, addr, next, phys, map_io);
+ alloc_init_pmd(mm, pud, addr, next, phys, prot);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -252,7 +244,7 @@ static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
*/
static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
phys_addr_t phys, unsigned long virt,
- phys_addr_t size, int map_io)
+ phys_addr_t size, pgprot_t prot)
{
unsigned long addr, length, end, next;
@@ -262,7 +254,7 @@ static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(mm, pgd, addr, next, phys, map_io);
+ alloc_init_pud(mm, pgd, addr, next, phys, prot);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
@@ -276,7 +268,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
return;
}
__create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
- size, 0);
+ size, PAGE_KERNEL_EXEC);
}
void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
@@ -286,7 +278,16 @@ void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
return;
}
__create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)],
- addr, addr, size, map_io);
+ addr, addr, size,
+ map_io ? __pgprot(PROT_DEVICE_nGnRE)
+ : PAGE_KERNEL_EXEC);
+}
+
+void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot)
+{
+ __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot);
}
static void __init map_mem(void)
OpenPOWER on IntegriCloud