powerpc: Free up some CPU feature bits by moving out MMU-related features
Some of the 64bit PPC CPU features are MMU-related, so this patch moves them to MMU_FTR_ bits. All cpu_has_feature()-style tests are moved to mmu_has_feature(), and seven feature bits are freed as a result. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:

committed by
Benjamin Herrenschmidt

parent
eca590f402
commit
44ae3ab335
@@ -167,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
|
||||
int esid_1t_count;
|
||||
|
||||
/* System is not 1T segment size capable. */
|
||||
if (!cpu_has_feature(CPU_FTR_1T_SEGMENT))
|
||||
if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
|
||||
return (GET_ESID(addr1) == GET_ESID(addr2));
|
||||
|
||||
esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
|
||||
@@ -202,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
||||
*/
|
||||
hard_irq_disable();
|
||||
offset = get_paca()->slb_cache_ptr;
|
||||
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
|
||||
if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
|
||||
offset <= SLB_CACHE_ENTRIES) {
|
||||
int i;
|
||||
asm volatile("isync" : : : "memory");
|
||||
|
Reference in New Issue
Block a user