ARM: pgtable: consolidate set_pte_ext(TOP_PTE,...) + tlb flush
A number of places establish a PTE in our top page table and immediately flush the TLB. Rather than having this at every callsite, provide an inline function for this purpose. This changes some global tlb flushes to be local; each time we setup one of these mappings, we always do it with preemption disabled which would prevent us migrating to another CPU. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
@@ -90,11 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
|
||||
kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
|
||||
kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
|
||||
|
||||
set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0);
|
||||
set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0);
|
||||
|
||||
flush_tlb_kernel_page(kfrom);
|
||||
flush_tlb_kernel_page(kto);
|
||||
set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
|
||||
set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
|
||||
|
||||
copy_page((void *)kto, (void *)kfrom);
|
||||
|
||||
@@ -119,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
|
||||
*/
|
||||
raw_spin_lock(&v6_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0);
|
||||
flush_tlb_kernel_page(to);
|
||||
set_top_pte(to, mk_pte(page, PAGE_KERNEL));
|
||||
clear_page((void *)to);
|
||||
|
||||
raw_spin_unlock(&v6_lock);
|
||||
|
Reference in New Issue
Block a user