arch, mm: Remove tlb_fast_mode()
Since the introduction of preemptible mmu_gather TLB fast mode has been broken. TLB fast mode relies on there being absolutely no concurrency; it frees pages first and invalidates TLBs later. However now we can get concurrency and stuff goes *bang*. This patch removes all tlb_fast_mode() code; it was found the better option vs trying to patch the hole by entangling tlb invalidation with the scheduler. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Reported-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
2c95523c0f
commit
29eb77825c
@@ -46,12 +46,6 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/machvec.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
|
||||
#else
|
||||
# define tlb_fast_mode(tlb) (1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we can't allocate a page to make a big batch of page pointers
|
||||
* to work on, then just handle a few from the on-stack structure.
|
||||
@@ -60,7 +54,7 @@
|
||||
|
||||
struct mmu_gather {
|
||||
struct mm_struct *mm;
|
||||
unsigned int nr; /* == ~0U => fast mode */
|
||||
unsigned int nr;
|
||||
unsigned int max;
|
||||
unsigned char fullmm; /* non-zero means full mm flush */
|
||||
unsigned char need_flush; /* really unmapped some PTEs? */
|
||||
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
|
||||
static inline void
|
||||
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned int nr;
|
||||
|
||||
if (!tlb->need_flush)
|
||||
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
|
||||
|
||||
/* lastly, release the freed pages */
|
||||
nr = tlb->nr;
|
||||
if (!tlb_fast_mode(tlb)) {
|
||||
unsigned long i;
|
||||
tlb->nr = 0;
|
||||
tlb->start_addr = ~0UL;
|
||||
for (i = 0; i < nr; ++i)
|
||||
free_page_and_swap_cache(tlb->pages[i]);
|
||||
}
|
||||
|
||||
tlb->nr = 0;
|
||||
tlb->start_addr = ~0UL;
|
||||
for (i = 0; i < nr; ++i)
|
||||
free_page_and_swap_cache(tlb->pages[i]);
|
||||
}
|
||||
|
||||
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
||||
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
|
||||
tlb->mm = mm;
|
||||
tlb->max = ARRAY_SIZE(tlb->local);
|
||||
tlb->pages = tlb->local;
|
||||
/*
|
||||
* Use fast mode if only 1 CPU is online.
|
||||
*
|
||||
* It would be tempting to turn on fast-mode for full_mm_flush as well. But this
|
||||
* doesn't work because of speculative accesses and software prefetching: the page
|
||||
* table of "mm" may (and usually is) the currently active page table and even
|
||||
* though the kernel won't do any user-space accesses during the TLB shoot down, a
|
||||
* compiler might use speculation or lfetch.fault on what happens to be a valid
|
||||
* user-space address. This in turn could trigger a TLB miss fault (or a VHPT
|
||||
* walk) and re-insert a TLB entry we just removed. Slow mode avoids such
|
||||
* problems. (We could make fast-mode work by switching the current task to a
|
||||
* different "mm" during the shootdown.) --davidm 08/02/2002
|
||||
*/
|
||||
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
|
||||
tlb->nr = 0;
|
||||
tlb->fullmm = full_mm_flush;
|
||||
tlb->start_addr = ~0UL;
|
||||
}
|
||||
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
||||
{
|
||||
tlb->need_flush = 1;
|
||||
|
||||
if (tlb_fast_mode(tlb)) {
|
||||
free_page_and_swap_cache(page);
|
||||
return 1; /* avoid calling tlb_flush_mmu */
|
||||
}
|
||||
|
||||
if (!tlb->nr && tlb->pages == tlb->local)
|
||||
__tlb_alloc_page(tlb);
|
||||
|
||||
|
Reference in New Issue
Block a user