diff options
-rw-r--r-- | arch/arm/mm/ioremap.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bdb248c..bc7d9bd 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -314,26 +314,24 @@ void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP - struct vm_struct **p, *tmp; + struct vm_struct *vm; /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle - * such a beast. We need the lock here b/c we need to clear - * all the mappings before the area can be reclaimed - * by someone else. + * such a beast. */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_ARM_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); + read_lock(&vmlist_lock); + for (vm = vmlist; vm; vm = vm->next) { + if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { + if (vm->flags & VM_ARM_SECTION_MAPPING) { + unmap_area_sections((unsigned long)vm->addr, + vm->size); } break; } } - write_unlock(&vmlist_lock); + read_unlock(&vmlist_lock); #endif vunmap(addr); |