sparc64: Reduce TLB flushes during hugepte changes
During hugepage map/unmap, TSB and TLB flushes are currently issued at every PAGE_SIZE'd boundary which is unnecessary. We now issue the flush at REAL_HPAGE_SIZE boundaries only. Without this patch workloads which unmap a large hugepage backed VMA region get CPU lockups due to excessive TLB flush calls. Orabug: 22365539, 22643230, 22995196 Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
b1ac6b7b40
commit
24e49ee3d7
@@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
|
||||
#define pgprot_noncached pgprot_noncached
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
static inline unsigned long __pte_huge_mask(void)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
@@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
||||
: "=r" (mask)
|
||||
: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
|
||||
|
||||
return __pte(pte_val(pte) | mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | __pte_huge_mask());
|
||||
}
|
||||
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
return !!(pte_val(pte) & __pte_huge_mask());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
@@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
return __pmd(pte_val(pte));
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
@@ -858,6 +874,19 @@ static inline unsigned long pud_pfn(pud_t pud)
|
||||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm);
|
||||
|
||||
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
/* It is more efficient to let flush_tlb_kernel_range()
|
||||
* handle init_mm tlb flushes.
|
||||
*
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
@@ -874,15 +903,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t orig = *ptep;
|
||||
|
||||
*ptep = pte;
|
||||
|
||||
/* It is more efficient to let flush_tlb_kernel_range()
|
||||
* handle init_mm tlb flushes.
|
||||
*
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define set_pte_at(mm,addr,ptep,pte) \
|
||||
|
@@ -8,6 +8,7 @@
|
||||
#define TLB_BATCH_NR 192
|
||||
|
||||
struct tlb_batch {
|
||||
bool huge;
|
||||
struct mm_struct *mm;
|
||||
unsigned long tlb_nr;
|
||||
unsigned long active;
|
||||
@@ -16,7 +17,7 @@ struct tlb_batch {
|
||||
|
||||
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
||||
void flush_tsb_user(struct tlb_batch *tb);
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
|
||||
|
||||
/* TLB flush operations. */
|
||||
|
||||
|
Reference in New Issue
Block a user