summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/ioremap.c
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2011-09-16 01:14:23 -0400
committerNicolas Pitre <nico@fluxnic.net>2011-11-26 19:21:28 -0500
commit576d2f2525612ecb5af029a76f21f22a3b82563d (patch)
tree81f9564c432ceeb4068dd3a5de204134a32c98f3 /arch/arm/mm/ioremap.c
parent6ee723a6570a897208b76ab3e9a495e9106b2f8c (diff)
downloadop-kernel-dev-576d2f2525612ecb5af029a76f21f22a3b82563d.zip
op-kernel-dev-576d2f2525612ecb5af029a76f21f22a3b82563d.tar.gz
ARM: add generic ioremap optimization by reusing static mappings
Now that we have all the static mappings from iotable_init() located in the vmalloc area, it is trivial to optimize ioremap by reusing those static mappings when the requested physical area fits in one of them, and so in a generic way for all platforms. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> Tested-by: Stephen Warren <swarren@nvidia.com> Tested-by: Kevin Hilman <khilman@ti.com> Tested-by: Jamie Iles <jamie@jamieiles.com>
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r--arch/arm/mm/ioremap.c72
1 files changed, 48 insertions, 24 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index bc7d9bd..12c7ad2 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,12 +36,6 @@
#include <asm/mach/map.h>
#include "mm.h"
-/*
- * Used by ioremap() and iounmap() code to mark (super)section-mapped
- * I/O regions in vm_struct->flags field.
- */
-#define VM_ARM_SECTION_MAPPING 0x80000000
-
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
@@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;
- /*
- * Don't allow RAM to be mapped - this causes problems with ARMv6+
- */
- if (WARN_ON(pfn_valid(pfn)))
- return NULL;
-
type = get_mem_type(mtype);
if (!type)
return NULL;
@@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
*/
size = PAGE_ALIGN(offset + size);
+ /*
+ * Try to reuse one of the static mapping whenever possible.
+ */
+ read_lock(&vmlist_lock);
+ for (area = vmlist; area; area = area->next) {
+ if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
+ break;
+ if (!(area->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+ continue;
+ if (__phys_to_pfn(area->phys_addr) > pfn ||
+ __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
+ continue;
+ /* we can drop the lock here as we know *area is static */
+ read_unlock(&vmlist_lock);
+ addr = (unsigned long)area->addr;
+ addr += __pfn_to_phys(pfn) - area->phys_addr;
+ return (void __iomem *) (offset + addr);
+ }
+ read_unlock(&vmlist_lock);
+
+ /*
+ * Don't allow RAM to be mapped - this causes problems with ARMv6+
+ */
+ if (WARN_ON(pfn_valid(pfn)))
+ return NULL;
+
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
@@ -313,26 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
-#ifndef CONFIG_SMP
struct vm_struct *vm;
- /*
- * If this is a section based mapping we need to handle it
- * specially as the VM subsystem does not know how to handle
- * such a beast.
- */
read_lock(&vmlist_lock);
for (vm = vmlist; vm; vm = vm->next) {
- if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) {
- if (vm->flags & VM_ARM_SECTION_MAPPING) {
- unmap_area_sections((unsigned long)vm->addr,
- vm->size);
- }
+ if (vm->addr > addr)
break;
+ if (!(vm->flags & VM_IOREMAP))
+ continue;
+ /* If this is a static mapping we must leave it alone */
+ if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
+ (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
+ read_unlock(&vmlist_lock);
+ return;
}
+#ifndef CONFIG_SMP
+ /*
+ * If this is a section based mapping we need to handle it
+ * specially as the VM subsystem does not know how to handle
+ * such a beast.
+ */
+ if ((vm->addr == addr) &&
+ (vm->flags & VM_ARM_SECTION_MAPPING)) {
+ unmap_area_sections((unsigned long)vm->addr, vm->size);
+ break;
+ }
+#endif
}
read_unlock(&vmlist_lock);
-#endif
vunmap(addr);
}
OpenPOWER on IntegriCloud