Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:
 "Some 32-bit kgdb cleanups from Sam Ravnborg, and a hugepage TLB flush
  overhead fix on 64-bit from Nitin Gupta"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Reduce TLB flushes during hugepte changes
  aeroflex/greth: fix warning about unused variable
  openprom: fix warning
  sparc32: drop superfluous cast in calls to __nocache_pa()
  sparc32: fix build with STRICT_MM_TYPECHECKS
  sparc32: use proper prototype for trapbase
  sparc32: drop local prototype in kgdb_32
  sparc32: drop hardcoding trap_level in kgdb_trap
This commit is contained in:
Linus Torvalds
2016-05-22 19:24:13 -07:00
19 changed files with 147 additions and 110 deletions

View File

@@ -43,10 +43,10 @@
nop;
#ifdef CONFIG_KGDB
#define KGDB_TRAP(num) \
b kgdb_trap_low; \
rd %psr,%l0; \
nop; \
#define KGDB_TRAP(num) \
mov num, %l7; \
b kgdb_trap_low; \
rd %psr,%l0; \
nop;
#else
#define KGDB_TRAP(num) \

View File

@@ -28,10 +28,10 @@ enum regnames {
#define NUMREGBYTES ((GDB_CSR + 1) * 4)
#else
#define NUMREGBYTES ((GDB_Y + 1) * 8)
#endif
struct pt_regs;
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
#endif
void arch_kgdb_breakpoint(void);

View File

@@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } )
/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
#define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
@@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t;
#define __pte(x) (x)
#define __iopte(x) (x)
/* #define __pmd(x) (x) */ /* XXX later */
#define __pgd(x) (x)
#define __ctxd(x) (x)
#define __pgprot(x) (x)

View File

@@ -29,9 +29,9 @@ static inline void free_pgd_fast(pgd_t *pgd)
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{
unsigned long pa = __nocache_pa((unsigned long)pmdp);
unsigned long pa = __nocache_pa(pmdp);
set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4)));
set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
}
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)

View File

@@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
prot &= ~__pgprot(SRMMU_CACHE);
pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
return prot;
}

View File

@@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline pte_t pte_mkhuge(pte_t pte)
static inline unsigned long __pte_huge_mask(void)
{
unsigned long mask;
@@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
: "=r" (mask)
: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
return __pte(pte_val(pte) | mask);
return mask;
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return __pte(pte_val(pte) | __pte_huge_mask());
}
static inline bool is_hugetlb_pte(pte_t pte)
{
return !!(pte_val(pte) & __pte_huge_mask());
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
@@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
return __pmd(pte_val(pte));
}
#endif
#else
static inline bool is_hugetlb_pte(pte_t pte)
{
return false;
}
#endif
static inline pte_t pte_mkdirty(pte_t pte)
@@ -856,6 +872,19 @@ static inline unsigned long pud_pfn(pud_t pud)
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm);
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm)
{
/* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes.
*
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr,
@@ -872,15 +901,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t orig = *ptep;
*ptep = pte;
/* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes.
*
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
}
#define set_pte_at(mm,addr,ptep,pte) \

View File

@@ -8,6 +8,7 @@
#define TLB_BATCH_NR 192
struct tlb_batch {
bool huge;
struct mm_struct *mm;
unsigned long tlb_nr;
unsigned long active;
@@ -16,7 +17,7 @@ struct tlb_batch {
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
void flush_tsb_user(struct tlb_batch *tb);
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
/* TLB flush operations. */