[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB.
The trick is that every TSB load/store is registered into
a special instruction patch section. The default uses
virtual addresses, and the patch instructions use physical
address load/stores.
We can't do this on all chips because only cheetah+ and later
have the physical variant of the atomic quad load.
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 2c21d85..4893f3e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -39,6 +39,7 @@
#include <asm/tlb.h>
#include <asm/spitfire.h>
#include <asm/sections.h>
+#include <asm/tsb.h>
extern void device_scan(void);
@@ -244,6 +245,16 @@
: "g1", "g7");
}
+static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
+{
+ unsigned long tsb_addr = (unsigned long) ent;
+
+ if (tlb_type == cheetah_plus)
+ tsb_addr = __pa(tsb_addr);
+
+ __tsb_insert(tsb_addr, tag, pte);
+}
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct mm_struct *mm;
@@ -1040,6 +1051,24 @@
return ~0UL;
}
+static void __init tsb_phys_patch(void)
+{
+ struct tsb_phys_patch_entry *p;
+
+ p = &__tsb_phys_patch;
+ while (p < &__tsb_phys_patch_end) {
+ unsigned long addr = p->addr;
+
+ *(unsigned int *) addr = p->insn;
+ wmb();
+ __asm__ __volatile__("flush %0"
+ : /* no outputs */
+ : "r" (addr));
+
+ p++;
+ }
+}
+
/* paging_init() sets up the page tables */
extern void cheetah_ecache_flush_init(void);
@@ -1052,6 +1081,9 @@
unsigned long end_pfn, pages_avail, shift;
unsigned long real_end, i;
+ if (tlb_type == cheetah_plus)
+ tsb_phys_patch();
+
/* Find available physical memory... */
read_obp_memory("available", &pavail[0], &pavail_ents);