diff options
Diffstat (limited to 'arch/mips/mm/ioremap.c')
-rw-r--r-- | arch/mips/mm/ioremap.c | 151 |
1 files changed, 30 insertions, 121 deletions
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 8317f337a86e..b6dad2fd5575 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -14,94 +14,13 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm_types.h> +#include <linux/io.h> #include <asm/cacheflush.h> -#include <asm/io.h> #include <asm/tlbflush.h> +#include <ioremap.h> -static inline void remap_area_pte(pte_t * pte, unsigned long address, - phys_addr_t size, phys_addr_t phys_addr, unsigned long flags) -{ - phys_addr_t end; - unsigned long pfn; - pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE - | __WRITEABLE | flags); - - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - BUG_ON(address >= end); - pfn = phys_addr >> PAGE_SHIFT; - do { - if (!pte_none(*pte)) { - printk("remap_area_pte: page already exists\n"); - BUG(); - } - set_pte(pte, pfn_pte(pfn, pgprot)); - address += PAGE_SIZE; - pfn++; - pte++; - } while (address && (address < end)); -} - -static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, - phys_addr_t size, phys_addr_t phys_addr, unsigned long flags) -{ - phys_addr_t end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - phys_addr -= address; - BUG_ON(address >= end); - do { - pte_t * pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - remap_area_pte(pte, address, end - address, address + phys_addr, flags); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return 0; -} - -static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, - phys_addr_t size, unsigned long flags) -{ - int error; - pgd_t * dir; - unsigned long end = address + size; - - phys_addr -= address; - dir = pgd_offset(&init_mm, address); - flush_cache_all(); - BUG_ON(address >= end); - do { - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - - error = -ENOMEM; - p4d = p4d_alloc(&init_mm, dir, address); - if (!p4d) - break; - pud = pud_alloc(&init_mm, p4d, address); - if (!pud) - break; - pmd = pmd_alloc(&init_mm, pud, address); - if (!pmd) - break; - if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) - break; - error = 0; - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); - flush_tlb_all(); - return error; -} +#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) +#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, void *arg) @@ -118,27 +37,25 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, } /* - * Generic mapping function (not visible outside): - */ - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. + * ioremap_prot - map bus memory into CPU space + * @phys_addr: bus address of the memory + * @size: size of the resource to map * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. + * ioremap_prot gives the caller control over cache coherency attributes (CCA) */ - -#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) - -void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, + unsigned long prot_val) { + unsigned long flags = prot_val & _CACHE_MASK; unsigned long offset, pfn, last_pfn; - struct vm_struct * area; + struct vm_struct *area; phys_addr_t last_addr; - void * addr; + unsigned long vaddr; + void __iomem *cpu_addr; + + cpu_addr = plat_ioremap(phys_addr, size, flags); + if (cpu_addr) + return cpu_addr; phys_addr = fixup_bigphys_addr(phys_addr, size); @@ -181,30 +98,22 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; - addr = area->addr; - if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { - vunmap(addr); + vaddr = (unsigned long)area->addr; + + flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE; + if (ioremap_page_range(vaddr, vaddr + size, phys_addr, + __pgprot(flags))) { + free_vm_area(area); return NULL; } - return (void __iomem *) (offset + (char *)addr); + return (void __iomem *)(vaddr + offset); } +EXPORT_SYMBOL(ioremap_prot); -#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) - -void __iounmap(const volatile void __iomem *addr) +void iounmap(const volatile void __iomem *addr) { - struct vm_struct *p; - - if (IS_KSEG1(addr)) - return; - - p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); - if (!p) - printk(KERN_ERR "iounmap: bad address %p\n", addr); - - kfree(p); + if (!plat_iounmap(addr) && !IS_KSEG1(addr)) + vunmap((void *)((unsigned long)addr & PAGE_MASK)); } - -EXPORT_SYMBOL(__ioremap); -EXPORT_SYMBOL(__iounmap); +EXPORT_SYMBOL(iounmap); |