summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-09-25 18:21:26 +0200
committerRalf Baechle <ralf@linux-mips.org>2013-10-29 21:25:24 +0100
commit14bd8c082016cd1f67fdfd702e4cf6367869a712 (patch)
treeb4d517e79c58fd3c665286b39ddb1801890f2cb0 /arch/mips/mm
parent7b784c634b4147345b46251884be6be4bd45fd43 (diff)
MIPS: Loongson: Get rid of Loongson 2 #ifdefery all over arch/mips.
It was ugly. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c52
-rw-r--r--arch/mips/mm/tlb-r4k.c37
-rw-r--r--arch/mips/mm/tlbex.c169
3 files changed, 139 insertions, 119 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bc6f96fcb529..62ffd20ea869 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
static inline void local_r4k___flush_cache_all(void * args)
{
-#if defined(CONFIG_CPU_LOONGSON2)
- r4k_blast_scache();
- return;
-#endif
- r4k_blast_dcache();
- r4k_blast_icache();
-
switch (current_cpu_type()) {
+ case CPU_LOONGSON2:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ /*
+ * These caches are inclusive caches, that is, if something
+ * is not cached in the S-cache, we know it also won't be
+ * in one of the primary caches.
+ */
r4k_blast_scache();
+ break;
+
+ default:
+ r4k_blast_dcache();
+ r4k_blast_icache();
+ break;
}
}
@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
if (end - start > icache_size)
r4k_blast_icache();
- else
- protected_blast_icache_range(start, end);
+ else {
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2:
+ protected_blast_icache_range(start, end);
+ break;
+
+ default:
+ protected_loongson23_blast_icache_range(start, end);
+ break;
+ }
+ }
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -1109,15 +1123,14 @@ static void probe_pcache(void)
case CPU_ALCHEMY:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
- }
-#ifdef CONFIG_CPU_LOONGSON2
- /*
- * LOONGSON2 has 4 way icache, but when using indexed cache op,
- * one op will act on all 4 ways
- */
- c->icache.ways = 1;
-#endif
+ case CPU_LOONGSON2:
+ /*
+ * LOONGSON2 has 4 way icache, but when using indexed cache op,
+ * one op will act on all 4 ways
+ */
+ c->icache.ways = 1;
+ }
printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10,
@@ -1193,7 +1206,6 @@ static int probe_scache(void)
return 1;
}
-#if defined(CONFIG_CPU_LOONGSON2)
static void __init loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
-#endif
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
@@ -1259,11 +1270,10 @@ static void setup_scache(void)
#endif
return;
-#if defined(CONFIG_CPU_LOONGSON2)
case CPU_LOONGSON2:
loongson2_sc_init();
return;
-#endif
+
case CPU_XLP:
/* don't need to worry about L2, fully coherent */
return;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bb3a5f643e97..da3b0b9c9eae 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
#endif /* CONFIG_MIPS_MT_SMTC */
-#if defined(CONFIG_CPU_LOONGSON2)
/*
* LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
* unfortrunately, itlb is not totally transparent to software.
*/
-#define FLUSH_ITLB write_c0_diag(4);
-
-#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
-
-#else
-
-#define FLUSH_ITLB
-#define FLUSH_ITLB_VM(vma)
+static inline void flush_itlb(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_LOONGSON2:
+ write_c0_diag(4);
+ break;
+ default:
+ break;
+ }
+}
-#endif
+static inline void flush_itlb_vm(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ flush_itlb();
+}
void local_flush_tlb_all(void)
{
@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
} else {
drop_mmu_context(mm, cpu);
}
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
}
@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
} else {
local_flush_tlb_all();
}
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
finish:
write_c0_entryhi(oldpid);
- FLUSH_ITLB_VM(vma);
+ flush_itlb_vm(vma);
EXIT_CRITICAL(flags);
}
}
@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
- FLUSH_ITLB;
+ flush_itlb();
EXIT_CRITICAL(flags);
}
@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_indexed();
}
tlbw_use_hazard();
- FLUSH_ITLB_VM(vma);
+ flush_itlb_vm(vma);
EXIT_CRITICAL(flags);
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index fffa7fe319a0..183f2b583e4d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1311,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
* need three, with the second nop'ed and the third being
* unused.
*/
- /* Loongson2 ebase is different than r4k, we have more space */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
- if ((p - tlb_handler) > 64)
- panic("TLB refill handler space exceeded");
-#else
- if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
- || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
- && uasm_insn_has_bdelay(relocs,
- tlb_handler + MIPS64_REFILL_INSNS - 3)))
- panic("TLB refill handler space exceeded");
-#endif
-
- /*
- * Now fold the handler in the TLB refill handler space.
- */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
- f = final_handler;
- /* Simplest case, just copy the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, p, f);
- final_len = p - tlb_handler;
-#else /* CONFIG_64BIT */
- f = final_handler + MIPS64_REFILL_INSNS;
- if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
- /* Just copy the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, p, f);
- final_len = p - tlb_handler;
- } else {
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- const enum label_id ls = label_tlb_huge_update;
-#else
- const enum label_id ls = label_vmalloc;
-#endif
- u32 *split;
- int ov = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
- ;
- BUG_ON(i == ARRAY_SIZE(labels));
- split = labels[i].addr;
-
- /*
- * See if we have overflown one way or the other.
- */
- if (split > tlb_handler + MIPS64_REFILL_INSNS ||
- split < p - MIPS64_REFILL_INSNS)
- ov = 1;
-
- if (ov) {
+ switch (boot_cpu_type()) {
+ default:
+ if (sizeof(long) == 4) {
+ case CPU_LOONGSON2:
+ /* Loongson2 ebase is different than r4k, we have more space */
+ if ((p - tlb_handler) > 64)
+ panic("TLB refill handler space exceeded");
/*
- * Split two instructions before the end. One
- * for the branch and one for the instruction
- * in the delay slot.
+ * Now fold the handler in the TLB refill handler space.
*/
- split = tlb_handler + MIPS64_REFILL_INSNS - 2;
-
+ f = final_handler;
+ /* Simplest case, just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ break;
+ } else {
+ if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
+ || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
+ && uasm_insn_has_bdelay(relocs,
+ tlb_handler + MIPS64_REFILL_INSNS - 3)))
+ panic("TLB refill handler space exceeded");
/*
- * If the branch would fall in a delay slot,
- * we must back up an additional instruction
- * so that it is no longer in a delay slot.
+ * Now fold the handler in the TLB refill handler space.
*/
- if (uasm_insn_has_bdelay(relocs, split - 1))
- split--;
- }
- /* Copy first part of the handler. */
- uasm_copy_handler(relocs, labels, tlb_handler, split, f);
- f += split - tlb_handler;
-
- if (ov) {
- /* Insert branch. */
- uasm_l_split(&l, final_handler);
- uasm_il_b(&f, &r, label_split);
- if (uasm_insn_has_bdelay(relocs, split))
- uasm_i_nop(&f);
- else {
- uasm_copy_handler(relocs, labels,
- split, split + 1, f);
- uasm_move_labels(labels, f, f + 1, -1);
- f++;
- split++;
+ f = final_handler + MIPS64_REFILL_INSNS;
+ if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
+ /* Just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ } else {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ const enum label_id ls = label_tlb_huge_update;
+#else
+ const enum label_id ls = label_vmalloc;
+#endif
+ u32 *split;
+ int ov = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
+ ;
+ BUG_ON(i == ARRAY_SIZE(labels));
+ split = labels[i].addr;
+
+ /*
+ * See if we have overflown one way or the other.
+ */
+ if (split > tlb_handler + MIPS64_REFILL_INSNS ||
+ split < p - MIPS64_REFILL_INSNS)
+ ov = 1;
+
+ if (ov) {
+ /*
+ * Split two instructions before the end. One
+ * for the branch and one for the instruction
+ * in the delay slot.
+ */
+ split = tlb_handler + MIPS64_REFILL_INSNS - 2;
+
+ /*
+ * If the branch would fall in a delay slot,
+ * we must back up an additional instruction
+ * so that it is no longer in a delay slot.
+ */
+ if (uasm_insn_has_bdelay(relocs, split - 1))
+ split--;
+ }
+ /* Copy first part of the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, split, f);
+ f += split - tlb_handler;
+
+ if (ov) {
+ /* Insert branch. */
+ uasm_l_split(&l, final_handler);
+ uasm_il_b(&f, &r, label_split);
+ if (uasm_insn_has_bdelay(relocs, split))
+ uasm_i_nop(&f);
+ else {
+ uasm_copy_handler(relocs, labels,
+ split, split + 1, f);
+ uasm_move_labels(labels, f, f + 1, -1);
+ f++;
+ split++;
+ }
+ }
+
+ /* Copy the rest of the handler. */
+ uasm_copy_handler(relocs, labels, split, p, final_handler);
+ final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
+ (p - split);
}
}
-
- /* Copy the rest of the handler. */
- uasm_copy_handler(relocs, labels, split, p, final_handler);
- final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
- (p - split);
+ break;
}
-#endif /* CONFIG_64BIT */
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB refill handler (%u instructions).\n",