summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-02-22 14:45:42 +0000
committerWill Deacon <will.deacon@arm.com>2019-04-08 12:00:40 +0100
commitb012980d1c6e27f5c4adf0c19defca8658956820 (patch)
tree11d24c8be3e2aeacd423bc556ff68da03a82bf60 /arch/riscv/include/asm
parent420af1554790a95e6813f56f63b6d2361614082b (diff)
riscv/mmiowb: Hook up mmwiob() implementation to asm-generic code
In a bid to kill off explicit mmiowb() usage in driver code, hook up the asm-generic mmiowb() tracking code for riscv, so that an mmiowb() is automatically issued from spin_unlock() if an I/O write was performed in the critical section. Reviewed-by: Palmer Dabbelt <palmer@sifive.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/io.h15
-rw-r--r--arch/riscv/include/asm/mmiowb.h14
3 files changed, 16 insertions, 14 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 221cd2ec78a4..cccd12cf27d4 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -21,7 +21,6 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 1d9c1376dc64..744fd92e77bc 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -20,6 +20,7 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
+#include <asm/mmiowb.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -100,18 +101,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif
/*
- * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
- * the ordering with I/O on spinlocks like PowerPC does. The worry is that
- * drivers won't get this correct, but I also don't want to introduce a fence
- * into the lock code that otherwise only uses AMOs (and is essentially defined
- * by the ISA to be correct). For now I'm leaving this here: "o,w" is
- * sufficient to ensure that all writes to the device have completed before the
- * write to the spinlock is allowed to commit. I surmised this from reading
- * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
- */
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
-
-/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
* operations to the I/O regions.
@@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
-#define __io_aw() do {} while (0)
+#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
new file mode 100644
index 000000000000..5d7e3a2b4e3b
--- /dev/null
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_MMIOWB_H
+#define _ASM_RISCV_MMIOWB_H
+
+/*
+ * "o,w" is sufficient to ensure that all writes to the device have completed
+ * before the write to the spinlock is allowed to commit.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* ASM_RISCV_MMIOWB_H */