x86/mm/tlb: Revert the recent lazy TLB patches
Revert commits:95b0e6357d
x86/mm/tlb: Always use lazy TLB mode64482aafe5
x86/mm/tlb: Only send page table free TLB flush to lazy TLB CPUsac03158969
x86/mm/tlb: Make lazy TLB mode lazier61d0beb579
x86/mm/tlb: Restructure switch_mm_irqs_off()2ff6ddf19c
x86/mm/tlb: Leave lazy TLB mode at page table free time In order to simplify the TLB invalidate fixes for x86 and unify the parts that need backporting. We'll try again later. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Rik van Riel <riel@surriel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
这个提交包含在:
@@ -148,6 +148,22 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
|
||||
#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
|
||||
#endif
|
||||
|
||||
static inline bool tlb_defer_switch_to_init_mm(void)
|
||||
{
|
||||
/*
|
||||
* If we have PCID, then switching to init_mm is reasonably
|
||||
* fast. If we don't have PCID, then switching to init_mm is
|
||||
* quite slow, so we try to defer it in the hopes that we can
|
||||
* avoid it entirely. The latter approach runs the risk of
|
||||
* receiving otherwise unnecessary IPIs.
|
||||
*
|
||||
* This choice is just a heuristic. The tlb code can handle this
|
||||
* function returning true or false regardless of whether we have
|
||||
* PCID.
|
||||
*/
|
||||
return !static_cpu_has(X86_FEATURE_PCID);
|
||||
}
|
||||
|
||||
struct tlb_context {
|
||||
u64 ctx_id;
|
||||
u64 tlb_gen;
|
||||
@@ -538,9 +554,4 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
||||
native_flush_tlb_others(mask, info)
|
||||
#endif
|
||||
|
||||
extern void tlb_flush_remove_tables(struct mm_struct *mm);
|
||||
extern void tlb_flush_remove_tables_local(void *arg);
|
||||
|
||||
#define HAVE_TLB_FLUSH_REMOVE_TABLES
|
||||
|
||||
#endif /* _ASM_X86_TLBFLUSH_H */
|
||||
|
在新工单中引用
屏蔽一个用户