diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-01-19 13:34:38 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-01-19 13:34:38 +0900 |
commit | d57d64080ddc0ff13fcffc898b6251074a482ba1 (patch) | |
tree | c38fd506a30d56de84a39285412ffc1b45cc8d33 /arch/sh/mm | |
parent | af1415314a4190b8ea06e53808d392fcf91555af (diff) | |
download | op-kernel-dev-d57d64080ddc0ff13fcffc898b6251074a482ba1.zip op-kernel-dev-d57d64080ddc0ff13fcffc898b6251074a482ba1.tar.gz |
sh: Prevent 64-bit pgprot clobbering across ioremap implementations.
Presently 'flags' gets passed around a lot between the various ioremap
helpers and implementations, which is only 32-bits. In the X2TLB case
we use 64-bit pgprots which presently results in the upper 32bits being
chopped off (which handily include our read/write/exec permissions).
As such, we convert everything internally to using pgprot_t directly and
simply convert over with pgprot_val() where needed. With this in place,
transparent fixmap utilization for early ioremap works as expected.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/ioremap.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a130b22..85b420d 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -35,11 +35,10 @@ */ void __iomem * __init_refok __ioremap_caller(unsigned long phys_addr, unsigned long size, - unsigned long flags, void *caller) + pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; - pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -69,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) - return ioremap_fixed(phys_addr, size, __pgprot(flags)); + return ioremap_fixed(phys_addr, size, pgprot); /* * Ok, go for it.. @@ -91,8 +90,9 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * PMB entries are all pre-faulted. */ if (unlikely(phys_addr >= P1SEG)) { - unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + unsigned long mapped; + mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); if (likely(mapped)) { addr += mapped; phys_addr += mapped; @@ -101,7 +101,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, } #endif - pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (likely(size)) if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); |