diff options
author | Joonsoo Kim <js1304@gmail.com> | 2013-02-09 06:28:06 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-02-16 17:54:22 +0000 |
commit | 101eeda38c0ab8a4f916176e325d9e036d981a24 (patch) | |
tree | 874b1f0002c9153e6af28b1ec3dd6aa0e7737086 /arch/arm/mm/ioremap.c | |
parent | ed8fd2186a4e4f3b98434093b56f9b793d48443e (diff) |
ARM: 7646/1: mm: use static_vm for managing static mapped areas
A static mapped area is ARM-specific, so it is better not to use
generic vmalloc data structure, that is, vmlist and vmlist_lock
for managing static mapped area. And it causes some needless overhead and
reducing this overhead is better idea.
Now, we have newly introduced static_vm infrastructure.
With it, we don't need to iterate all mapped areas. Instead, we just
iterate static mapped areas. It helps to reduce an overhead of finding
matched area. And architecture dependency on vmalloc layer is removed,
so it will help to maintainability for vmalloc layer.
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Rob Herring <rob.herring@calxeda.com>
Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r-- | arch/arm/mm/ioremap.c | 71 |
1 files changed, 29 insertions, 42 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 904c15e86063..04d9006eab1f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, const struct mem_type *type; int err; unsigned long addr; - struct vm_struct * area; + struct vm_struct *area; + phys_addr_t paddr = __pfn_to_phys(pfn); #ifndef CONFIG_ARM_LPAE /* * High mappings must be supersection aligned */ - if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) + if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) return NULL; #endif @@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, /* * Try to reuse one of the static mapping whenever possible. */ - read_lock(&vmlist_lock); - for (area = vmlist; area; area = area->next) { - if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) - break; - if (!(area->flags & VM_ARM_STATIC_MAPPING)) - continue; - if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) - continue; - if (__phys_to_pfn(area->phys_addr) > pfn || - __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) - continue; - /* we can drop the lock here as we know *area is static */ - read_unlock(&vmlist_lock); - addr = (unsigned long)area->addr; - addr += __pfn_to_phys(pfn) - area->phys_addr; - return (void __iomem *) (offset + addr); + if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { + struct static_vm *svm; + + svm = find_static_vm_paddr(paddr, size, mtype); + if (svm) { + addr = (unsigned long)svm->vm.addr; + addr += paddr - svm->vm.phys_addr; + return (void __iomem *) (offset + addr); + } } - read_unlock(&vmlist_lock); /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ @@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (!area) return NULL; addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(pfn); + area->phys_addr = paddr; #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && - !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { + !((paddr | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, type); - } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { + } else if (!((paddr | size | addr) & ~PMD_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else #endif - err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + err = ioremap_page_range(addr, addr + size, paddr, __pgprot(type->prot_pte)); if (err) { @@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct *vm; + struct static_vm *svm; + + /* If this is a static mapping, we must leave it alone */ + svm = find_static_vm_vaddr(addr); + if (svm) + return; - read_lock(&vmlist_lock); - for (vm = vmlist; vm; vm = vm->next) { - if (vm->addr > addr) - break; - if (!(vm->flags & VM_IOREMAP)) - continue; - /* If this is a static mapping we must leave it alone */ - if ((vm->flags & VM_ARM_STATIC_MAPPING) && - (vm->addr <= addr) && (vm->addr + vm->size > addr)) { - read_unlock(&vmlist_lock); - return; - } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) + { + struct vm_struct *vm; + + vm = find_vm_area(addr); + /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ - if ((vm->addr == addr) && - (vm->flags & VM_ARM_SECTION_MAPPING)) { + if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) unmap_area_sections((unsigned long)vm->addr, vm->size); - break; - } -#endif } - read_unlock(&vmlist_lock); +#endif vunmap(addr); } |