MIPS: Allow platform specific scratch registers
XLR/XLP COP0 scratch is register 22, sel 0-7. Add a function
c0_kscratch() which returns the scratch register for the platform,
and use the return value while generating TLB handlers.
Setup kscratch_mask to 0xf for XLR/XLP since the config4 register
does not exist. This allows the kernel to allocate scratch registers
0-3 if needed.
Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5445/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index afeef93..c052df8 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -305,6 +305,17 @@
static unsigned int kscratch_used_mask __cpuinitdata;
+static inline int __maybe_unused c0_kscratch(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_XLP:
+ case CPU_XLR:
+ return 22;
+ default:
+ return 31;
+ }
+}
+
static int __cpuinit allocate_kscratch(void)
{
int r;
@@ -336,7 +347,7 @@
if (scratch_reg > 0) {
/* Save in CPU local C0_KScratch? */
- UASM_i_MTC0(p, 1, 31, scratch_reg);
+ UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
r.r1 = K0;
r.r2 = K1;
r.r3 = 1;
@@ -385,7 +396,7 @@
static void __cpuinit build_restore_work_registers(u32 **p)
{
if (scratch_reg > 0) {
- UASM_i_MFC0(p, 1, 31, scratch_reg);
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
/* K0 already points to save area, restore $1 and $2 */
@@ -674,7 +685,7 @@
uasm_il_b(p, r, lid);
}
if (scratch_reg > 0)
- UASM_i_MFC0(p, 1, 31, scratch_reg);
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else {
@@ -817,7 +828,7 @@
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
} else {
/*
* &pgd << 11 stored in CONTEXT [23..63].
@@ -930,7 +941,7 @@
if (mode == refill_scratch) {
if (scratch_reg > 0)
- UASM_i_MFC0(p, 1, 31, scratch_reg);
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
} else {
@@ -1096,7 +1107,7 @@
static struct mips_huge_tlb_info __cpuinit
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
- unsigned int ptr, int c0_scratch)
+ unsigned int ptr, int c0_scratch_reg)
{
struct mips_huge_tlb_info rv;
unsigned int even, odd;
@@ -1110,12 +1121,12 @@
UASM_i_MFC0(p, tmp, C0_BADVADDR);
if (pgd_reg != -1)
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else
UASM_i_MFC0(p, ptr, C0_CONTEXT);
- if (c0_scratch >= 0)
- UASM_i_MTC0(p, scratch, 31, c0_scratch);
+ if (c0_scratch_reg >= 0)
+ UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
@@ -1130,14 +1141,14 @@
}
} else {
if (pgd_reg != -1)
- UASM_i_MFC0(p, ptr, 31, pgd_reg);
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
else
UASM_i_MFC0(p, ptr, C0_CONTEXT);
UASM_i_MFC0(p, tmp, C0_BADVADDR);
- if (c0_scratch >= 0)
- UASM_i_MTC0(p, scratch, 31, c0_scratch);
+ if (c0_scratch_reg >= 0)
+ UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
else
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
@@ -1242,8 +1253,8 @@
}
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
- if (c0_scratch >= 0) {
- UASM_i_MFC0(p, scratch, 31, c0_scratch);
+ if (c0_scratch_reg >= 0) {
+ UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p);
rv.restore_scratch = 1;
@@ -1490,7 +1501,7 @@
} else {
/* PGD in c0_KScratch */
uasm_i_jr(&p, 31);
- UASM_i_MTC0(&p, a0, 31, pgd_reg);
+ UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
}
if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
panic("tlbmiss_handler_setup_pgd_array space exceeded");