powerpc/mm: Drop WIMG in favour of new constants
PowerISA 3.0 introduces two pte bits with the below meaning for radix: 00 -> Normal Memory 01 -> Strong Access Order (SAO) 10 -> Non idempotent I/O (Cache inhibited and guarded) 11 -> Tolerant I/O (Cache inhibited) We drop the existing WIMG bits in the Linux page table in favour of the above constants. We loose _PAGE_WRITETHRU with this conversion. We only use writethru via pgprot_cached_wthru() which is used by fbdev/controlfb.c which is Apple control display and also PPC32. With respect to _PAGE_COHERENCE, we have been marking hpte always coherent for some time now. htab_convert_pte_flags() always added HPTE_R_M. NOTE: KVM changes need closer review. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

committed by
Michael Ellerman

parent
72176dd0ad
commit
30bda41aba
@@ -192,12 +192,13 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
||||
/*
|
||||
* Add in WIG bits
|
||||
*/
|
||||
if (pteflags & _PAGE_WRITETHRU)
|
||||
rflags |= HPTE_R_W;
|
||||
if (pteflags & _PAGE_NO_CACHE)
|
||||
|
||||
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
|
||||
rflags |= HPTE_R_I;
|
||||
if (pteflags & _PAGE_GUARDED)
|
||||
rflags |= HPTE_R_G;
|
||||
if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
|
||||
rflags |= (HPTE_R_I | HPTE_R_G);
|
||||
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
|
||||
rflags |= (HPTE_R_I | HPTE_R_W);
|
||||
|
||||
return rflags;
|
||||
}
|
||||
@@ -1142,8 +1143,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
|
||||
/* If this PTE is non-cacheable and we have restrictions on
|
||||
* using non cacheable large pages, then we switch to 4k
|
||||
*/
|
||||
if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
|
||||
(pte_val(*ptep) & _PAGE_NO_CACHE)) {
|
||||
if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
|
||||
if (user_region) {
|
||||
demote_segment_4k(mm, ea);
|
||||
psize = MMU_PAGE_4K;
|
||||
@@ -1297,13 +1297,13 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
|
||||
WARN_ON(hugepage_shift);
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
|
||||
/* If either _PAGE_4K_PFN or cache inhibited is set (and we are on
|
||||
* a 64K kernel), then we don't preload, hash_page() will take
|
||||
* care of it once we actually try to access the page.
|
||||
* That way we don't have to duplicate all of the logic for segment
|
||||
* page size demotion here
|
||||
*/
|
||||
if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
|
||||
if ((pte_val(*ptep) & _PAGE_4K_PFN) || pte_ci(*ptep))
|
||||
goto out_exit;
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
|
Reference in New Issue
Block a user