diff options
author | Christoph Hellwig <hch@lst.de> | 2019-08-07 19:01:38 +0300 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-11-11 17:19:49 +0100 |
commit | a1fd79ad0d906b736228684f5040a637de86d2b2 (patch) | |
tree | 63f2126e84d7172e82ee332f56fe278be920fb9c | |
parent | 5ace77e0b41af6b9a3a8cd189a79270e8840fe0a (diff) |
parisc: remove __ioremap
__ioremap is always called with the _PAGE_NO_CACHE, so fold the whole
thing and rename it to ioremap.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Helge Deller <deller@gmx.de>
-rw-r--r-- | arch/parisc/include/asm/io.h | 11 | ||||
-rw-r--r-- | arch/parisc/mm/ioremap.c | 10 |
2 files changed, 5 insertions, 16 deletions
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 93d37010b375..46212b52c23e 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -127,16 +127,7 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) /* * The standard PCI ioremap interfaces */ - -extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); - -/* Most machines react poorly to I/O-space being cacheable... Instead let's - * define ioremap() in terms of ioremap_nocache(). - */ -static inline void __iomem * ioremap(unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, _PAGE_NO_CACHE); -} +void __iomem *ioremap(unsigned long offset, unsigned long size); #define ioremap_nocache(off, sz) ioremap((off), (sz)) #define ioremap_wc ioremap_nocache #define ioremap_uc ioremap_nocache diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index f29f682352f0..6e7c005aa09b 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -25,7 +25,7 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { void __iomem *addr; struct vm_struct *area; @@ -36,10 +36,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l unsigned long end = phys_addr + size - 1; /* Support EISA addresses */ if ((phys_addr >= 0x00080000 && end < 0x000fffff) || - (phys_addr >= 0x00500000 && end < 0x03bfffff)) { + (phys_addr >= 0x00500000 && end < 0x03bfffff)) phys_addr |= F_EXTEND(0xfc000000); - flags |= _PAGE_NO_CACHE; - } #endif /* Don't allow wraparound or zero size */ @@ -65,7 +63,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | - _PAGE_ACCESSED | flags); + _PAGE_ACCESSED | _PAGE_NO_CACHE); /* * Mappings have to be page-aligned @@ -90,7 +88,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l return (void __iomem *) (offset + (char __iomem *)addr); } -EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(ioremap); void iounmap(const volatile void __iomem *io_addr) { |