[SPARC64]: Add infrastructure for dynamic TSB sizing.

This also cleans up tsb_context_switch().  The assembler
routine is now __tsb_context_switch() and the former is
an inline function that picks out the bits from the mm_struct
and passes it into the assembler code as arguments.

setup_tsb_parms() computes the locked TLB entry to map the
TSB.  Later when we support using the physical address quad
load instructions of Cheetah+ and later, we'll simply use
the physical address for the TSB register value and set
the map virtual and PTE both to zero.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 36384cf..2effeba 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,9 +90,20 @@
 
 #ifndef __ASSEMBLY__
 
+#define TSB_ENTRY_ALIGNMENT	16
+
+struct tsb {
+	unsigned long tag;
+	unsigned long pte;
+} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+
 typedef struct {
 	unsigned long	sparc64_ctx_val;
-	unsigned long	*sparc64_tsb;
+	struct tsb	*tsb;
+	unsigned long	tsb_nentries;
+	unsigned long	tsb_reg_val;
+	unsigned long	tsb_map_vaddr;
+	unsigned long	tsb_map_pte;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */