[SPARC64]: Add infrastructure for dynamic TSB sizing.
This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
09f94287f7
commit
98c5584cfc
@@ -90,9 +90,20 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define TSB_ENTRY_ALIGNMENT 16
|
||||
|
||||
struct tsb {
|
||||
unsigned long tag;
|
||||
unsigned long pte;
|
||||
} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
|
||||
|
||||
typedef struct {
|
||||
unsigned long sparc64_ctx_val;
|
||||
unsigned long *sparc64_tsb;
|
||||
struct tsb *tsb;
|
||||
unsigned long tsb_nentries;
|
||||
unsigned long tsb_reg_val;
|
||||
unsigned long tsb_map_vaddr;
|
||||
unsigned long tsb_map_pte;
|
||||
} mm_context_t;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
Reference in New Issue
Block a user