summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorNiklas Schnelle <schnelle@linux.ibm.com>2020-02-28 10:27:22 +0100
committerVasily Gorbik <gor@linux.ibm.com>2020-05-20 10:22:52 +0200
commita999eb96fdd4da488ac3085c40e20d61de26f6af (patch)
tree8953876566c8a5830aa519d0b9ad412a67ebf887 /arch/s390
parentbc4b295e87a86bf14333753daeb1c84909197c46 (diff)
s390/pci: ioremap() align with generic code
Let's use the same signature and parameter names as in the generic ioremap() definition making the physical address' type explicit. Add a check against address wrap around as in the generic lib/ioremap.c:ioremap_prot() code. Finally use free_vm_area() instead of vunmap() as in the generic code. Besides being clearer free_vm_area() can also skip a few additional checks compared with vunmap(). Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com> Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/io.h2
-rw-r--r--arch/s390/pci/pci.c21
2 files changed, 12 insertions, 11 deletions
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 5a16f500515a..da014e4f8113 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -26,7 +26,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
-void __iomem *ioremap(unsigned long offset, unsigned long size);
+void __iomem *ioremap(phys_addr_t addr, size_t size);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3f6670613c57..3902c9f6f2d6 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -226,28 +226,29 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count);
}
-void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
+void __iomem *ioremap(phys_addr_t addr, size_t size)
{
+ unsigned long offset, vaddr;
struct vm_struct *area;
- unsigned long offset;
+ phys_addr_t last_addr;
- if (!size)
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
return NULL;
if (!static_branch_unlikely(&have_mio))
- return (void __iomem *) ioaddr;
+ return (void __iomem *) addr;
- offset = ioaddr & ~PAGE_MASK;
- ioaddr &= PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset);
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
- if (ioremap_page_range((unsigned long) area->addr,
- (unsigned long) area->addr + size,
- ioaddr, PAGE_KERNEL)) {
- vunmap(area->addr);
+ vaddr = (unsigned long) area->addr;
+ if (ioremap_page_range(vaddr, vaddr + size, addr, PAGE_KERNEL)) {
+ free_vm_area(area);
return NULL;
}
return (void __iomem *) ((unsigned long) area->addr + offset);