[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.
It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
9cc3a1ac9a
commit
d7744a0950
@@ -121,6 +121,12 @@ kvmap_dtlb_obp:
|
||||
nop
|
||||
|
||||
.align 32
|
||||
kvmap_dtlb_tsb4m_load:
|
||||
KTSB_LOCK_TAG(%g1, %g2, %g7)
|
||||
KTSB_WRITE(%g1, %g5, %g6)
|
||||
ba,pt %xcc, kvmap_dtlb_load
|
||||
nop
|
||||
|
||||
kvmap_dtlb:
|
||||
/* %g6: TAG TARGET */
|
||||
mov TLB_TAG_ACCESS, %g4
|
||||
@@ -133,6 +139,13 @@ kvmap_dtlb_4v:
|
||||
brgez,pn %g4, kvmap_dtlb_nonlinear
|
||||
nop
|
||||
|
||||
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
|
||||
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
|
||||
|
||||
/* TSB entry address left in %g1, lookup linear PTE.
|
||||
* Must preserve %g1 and %g6 (TAG).
|
||||
*/
|
||||
kvmap_dtlb_tsb4m_miss:
|
||||
sethi %hi(kpte_linear_bitmap), %g2
|
||||
or %g2, %lo(kpte_linear_bitmap), %g2
|
||||
|
||||
@@ -163,7 +176,7 @@ kvmap_dtlb_4v:
|
||||
|
||||
.globl kvmap_linear_patch
|
||||
kvmap_linear_patch:
|
||||
ba,pt %xcc, kvmap_dtlb_load
|
||||
ba,pt %xcc, kvmap_dtlb_tsb4m_load
|
||||
xor %g2, %g4, %g5
|
||||
|
||||
kvmap_dtlb_vmalloc_addr:
|
||||
|
Reference in New Issue
Block a user