powerpc: handover page flags with a pgprot_t parameter
In order to avoid multiple conversions, handover directly a pgprot_t to map_kernel_page() as already done for radix. Do the same for __ioremap_caller() and __ioremap_at(). Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
committed by
Michael Ellerman
parent
56f3c1413f
commit
c766ee7223
@@ -142,7 +142,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
|
||||
* map_kernel_page adds an entry to the ioremap page table
|
||||
* and adds an entry to the HPT, possibly bolting it
|
||||
*/
|
||||
int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
|
||||
int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
@@ -161,8 +161,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
|
||||
ptep = pte_alloc_kernel(pmdp, ea);
|
||||
if (!ptep)
|
||||
return -ENOMEM;
|
||||
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
|
||||
__pgprot(flags)));
|
||||
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
|
||||
} else {
|
||||
/*
|
||||
* If the mm subsystem is not fully up, we cannot create a
|
||||
@@ -170,7 +169,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
|
||||
* entry in the hardware page table.
|
||||
*
|
||||
*/
|
||||
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
|
||||
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
|
||||
mmu_io_psize, mmu_kernel_ssize)) {
|
||||
printk(KERN_ERR "Failed to do bolted mapping IO "
|
||||
"memory at %016lx !\n", pa);
|
||||
|
||||
Reference in New Issue
Block a user