powerpc/mm: use pte helpers in generic code
Get rid of platform specific _PAGE_XXXX in powerpc common code and use helpers instead. mm/dump_linuxpagetables.c will be handled separately Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

zatwierdzone przez
Michael Ellerman

rodzic
34eb138ed7
commit
26973fa5ac
@@ -44,20 +44,13 @@ static inline int is_exec_fault(void)
|
||||
static inline int pte_looks_normal(pte_t pte)
|
||||
{
|
||||
|
||||
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||
if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) {
|
||||
if (pte_present(pte) && !pte_special(pte)) {
|
||||
if (pte_ci(pte))
|
||||
return 0;
|
||||
if (pte_user(pte))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return (pte_val(pte) &
|
||||
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER |
|
||||
_PAGE_PRIVILEGED)) ==
|
||||
(_PAGE_PRESENT | _PAGE_USER);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct page *maybe_pte_to_page(pte_t pte)
|
||||
@@ -117,7 +110,7 @@ static pte_t set_pte_filter(pte_t pte)
|
||||
struct page *pg;
|
||||
|
||||
/* No exec permission in the first place, move on */
|
||||
if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
|
||||
if (!pte_exec(pte) || !pte_looks_normal(pte))
|
||||
return pte;
|
||||
|
||||
/* If you set _PAGE_EXEC on weird pages you're on your own */
|
||||
@@ -137,7 +130,7 @@ static pte_t set_pte_filter(pte_t pte)
|
||||
}
|
||||
|
||||
/* Else, we filter out _PAGE_EXEC */
|
||||
return __pte(pte_val(pte) & ~_PAGE_EXEC);
|
||||
return pte_exprotect(pte);
|
||||
}
|
||||
|
||||
static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
|
||||
@@ -150,7 +143,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
|
||||
* if necessary. Also if _PAGE_EXEC is already set, same deal,
|
||||
* we just bail out
|
||||
*/
|
||||
if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
|
||||
if (dirty || pte_exec(pte) || !is_exec_fault())
|
||||
return pte;
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
@@ -176,7 +169,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
|
||||
set_bit(PG_arch_1, &pg->flags);
|
||||
|
||||
bail:
|
||||
return __pte(pte_val(pte) | _PAGE_EXEC);
|
||||
return pte_mkexec(pte);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
@@ -191,10 +184,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
||||
* Make sure hardware valid bit is not set. We don't do
|
||||
* tlb flush for this update.
|
||||
*/
|
||||
VM_WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
|
||||
VM_WARN_ON(pte_hw_valid(*ptep));
|
||||
|
||||
/* Add the pte bit when trying to set a pte */
|
||||
pte = __pte(pte_val(pte) | _PAGE_PTE);
|
||||
pte = pte_mkpte(pte);
|
||||
|
||||
/* Note: mm->context.id might not yet have been assigned as
|
||||
* this context might not have been activated yet when this
|
||||
|
@@ -112,15 +112,17 @@ EXPORT_SYMBOL(ioremap_coherent);
|
||||
void __iomem *
|
||||
ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
|
||||
{
|
||||
pte_t pte = __pte(flags);
|
||||
|
||||
/* writeable implies dirty for kernel addresses */
|
||||
if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
|
||||
flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
|
||||
if (pte_write(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
|
||||
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
|
||||
flags &= ~(_PAGE_USER | _PAGE_EXEC);
|
||||
flags |= _PAGE_PRIVILEGED;
|
||||
pte = pte_exprotect(pte);
|
||||
pte = pte_mkprivileged(pte);
|
||||
|
||||
return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
|
||||
return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_prot);
|
||||
|
||||
@@ -235,8 +237,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
|
||||
/* The PTE should never be already set nor present in the
|
||||
* hash table
|
||||
*/
|
||||
BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
|
||||
pgprot_val(prot));
|
||||
BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
|
||||
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
|
||||
}
|
||||
smp_wmb();
|
||||
|
@@ -230,23 +230,23 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
|
||||
void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
pte_t pte = __pte(flags);
|
||||
void *caller = __builtin_return_address(0);
|
||||
|
||||
/* writeable implies dirty for kernel addresses */
|
||||
if (flags & _PAGE_WRITE)
|
||||
flags |= _PAGE_DIRTY;
|
||||
if (pte_write(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
|
||||
/* we don't want to let _PAGE_EXEC leak out */
|
||||
flags &= ~_PAGE_EXEC;
|
||||
pte = pte_exprotect(pte);
|
||||
/*
|
||||
* Force kernel mapping.
|
||||
*/
|
||||
flags &= ~_PAGE_USER;
|
||||
flags |= _PAGE_PRIVILEGED;
|
||||
pte = pte_mkprivileged(pte);
|
||||
|
||||
if (ppc_md.ioremap)
|
||||
return ppc_md.ioremap(addr, size, __pgprot(flags), caller);
|
||||
return __ioremap_caller(addr, size, __pgprot(flags), caller);
|
||||
return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
|
||||
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
|
||||
}
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user