summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2019-02-02 01:43:27 +0000
committerPaul Burton <paul.burton@mips.com>2019-02-04 10:56:35 -0800
commit535113896e802e9f8f92c05a887d1761c34ae903 (patch)
tree4d0b573a5ad008f478f806e585aec22db77e03cc /arch/mips/include/asm
parent0b317c389c6771cbe1c5a12fe9322285a808a9bd (diff)
MIPS: Add GINVT instruction helpers
Add a family of ginvt_* functions making it easy to emit a GINVT instruction to globally invalidate TLB entries. We make use of the _ASM_MACRO infrastructure to support emitting the instructions even if the assembler isn't new enough to support them natively. An associated STYPE_GINV definition & sync_ginv() function are added to emit a sync instruction of type 0x14, which operates as a completion barrier for these new GINVT (and GINVI) instructions. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/barrier.h19
-rw-r--r--arch/mips/include/asm/ginvt.h56
-rw-r--r--arch/mips/include/asm/mipsregs.h7
3 files changed, 82 insertions, 0 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index a5eb1bb199a7..b48ee2caf78d 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -105,6 +105,20 @@
*/
#define STYPE_SYNC_MB 0x10
+/*
+ * stype 0x14 - A completion barrier specific to global invalidations
+ *
+ * When a sync instruction of this type completes any preceding GINVI or GINVT
+ * operation has been globalized & completed on all coherent CPUs. Anything
+ * that the GINV* instruction should invalidate will have been invalidated on
+ * all coherent CPUs when this instruction completes. It is implementation
+ * specific whether the GINV* instructions themselves will ensure completion,
+ * or this sync type will.
+ *
+ * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3)
+ * this sync type also requires that previous SYNCI operations have completed.
+ */
+#define STYPE_GINV 0x14
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
@@ -222,6 +236,11 @@
#define __smp_mb__before_atomic() __smp_mb__before_llsc()
#define __smp_mb__after_atomic() smp_llsc_mb()
+static inline void sync_ginv(void)
+{
+ asm volatile("sync\t%0" :: "i"(STYPE_GINV));
+}
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h
new file mode 100644
index 000000000000..49c6dbe37338
--- /dev/null
+++ b/arch/mips/include/asm/ginvt.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MIPS_ASM_GINVT_H__
+#define __MIPS_ASM_GINVT_H__
+
+#include <asm/mipsregs.h>
+
+enum ginvt_type {
+ GINVT_FULL,
+ GINVT_VA,
+ GINVT_MMID,
+};
+
+#ifdef TOOLCHAIN_SUPPORTS_GINV
+# define _ASM_SET_GINV ".set ginv\n"
+#else
+_ASM_MACRO_1R1I(ginvt, rs, type,
+ _ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8))
+ _ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9)));
+# define _ASM_SET_GINV
+#endif
+
+static inline void ginvt(unsigned long addr, enum ginvt_type type)
+{
+ asm volatile(
+ ".set push\n"
+ _ASM_SET_GINV
+ " ginvt %0, %1\n"
+ ".set pop"
+ : /* no outputs */
+ : "r"(addr), "i"(type)
+ : "memory");
+}
+
+static inline void ginvt_full(void)
+{
+ ginvt(0, GINVT_FULL);
+}
+
+static inline void ginvt_va(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA);
+}
+
+static inline void ginvt_mmid(void)
+{
+ ginvt(0, GINVT_MMID);
+}
+
+static inline void ginvt_va_mmid(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA | GINVT_MMID);
+}
+
+#endif /* __MIPS_ASM_GINVT_H__ */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 402b80af91aa..900a47581dd1 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1247,6 +1247,13 @@ __asm__(".macro parse_r var r\n\t"
ENC \
".endm")
+/* Instructions with 1 register operand & 1 immediate operand */
+#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ ENC \
+ ".endm")
+
/* Instructions with 2 register operands */
#define _ASM_MACRO_2R(OP, R1, R2, ENC) \
__asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \