powerpc/mm: Remove the dependency on pte bit position in asm code
We should not expect pte bit position in asm code. Simply by moving part of that to C Acked-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

committed by
Michael Ellerman

szülő
91f1da9979
commit
106713a145
@@ -1206,6 +1206,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hash_page);
|
||||
|
||||
int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
|
||||
unsigned long dsisr)
|
||||
{
|
||||
unsigned long access = _PAGE_PRESENT;
|
||||
unsigned long flags = 0;
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
if (REGION_ID(ea) == VMALLOC_REGION_ID)
|
||||
mm = &init_mm;
|
||||
|
||||
if (dsisr & DSISR_NOHPTE)
|
||||
flags |= HPTE_NOHPTE_UPDATE;
|
||||
|
||||
if (dsisr & DSISR_ISSTORE)
|
||||
access |= _PAGE_RW;
|
||||
/*
|
||||
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
|
||||
* accessing a userspace segment (even from the kernel). We assume
|
||||
* kernel addresses always have the high bit set.
|
||||
*/
|
||||
if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
|
||||
access |= _PAGE_USER;
|
||||
|
||||
if (trap == 0x400)
|
||||
access |= _PAGE_EXEC;
|
||||
|
||||
return hash_page_mm(mm, ea, access, trap, flags);
|
||||
}
|
||||
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
unsigned long access, unsigned long trap)
|
||||
{
|
||||
|
Reference in New Issue
Block a user