Merge tag 'xtensa-next-20130710' of git://github.com/czankel/xtensa-linux
Pull Xtensa updates from Chris Zankel. * tag 'xtensa-next-20130710' of git://github.com/czankel/xtensa-linux: (22 commits) xtensa: remove the second argument of __bio_kmap_atomic() xtensa: add static function tracer support xtensa: Flat DeviceTree copy not future-safe xtensa: check TLB sanity on return to userspace xtensa: adjust boot parameters address when INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is selected xtensa: bootparams: fix typo xtensa: tell git to ignore generated .dtb files xtensa: ccount based sched_clock xtensa: ccount based clockevent implementation xtensa: consolidate ccount access routines xtensa: cleanup ccount frequency tracking xtensa: timex.h: remove unused symbols xtensa: tell git to ignore copied zlib source files xtensa: fix section mismatch in pcibios_fixup_bus xtensa: ISS: fix section mismatch in iss_net_setup arch: xtensa: include: asm: compiling issue, need cmpxchg64() defined. xtensa: xtfpga: fix section mismatch xtensa: remove unused platform_init_irq() xtensa: tell git to ignore generated files xtensa: flush TLB entries for pages of non-current mm correctly ...
Этот коммит содержится в:
@@ -64,7 +64,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (mm == current->active_mm) {
|
||||
unsigned long flags;
|
||||
local_save_flags(flags);
|
||||
local_irq_save(flags);
|
||||
__get_new_mmu_context(mm);
|
||||
__load_mmu_context(mm);
|
||||
local_irq_restore(flags);
|
||||
@@ -94,7 +94,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
|
||||
printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
|
||||
(unsigned long)mm->context, start, end);
|
||||
#endif
|
||||
local_save_flags(flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
|
||||
int oldpid = get_rasid_register();
|
||||
@@ -128,9 +128,10 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
|
||||
if(mm->context == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
local_save_flags(flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
oldpid = get_rasid_register();
|
||||
set_rasid_register(ASID_INSERT(mm->context));
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
invalidate_itlb_mapping(page);
|
||||
@@ -140,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_TLB_SANITY
|
||||
|
||||
static unsigned get_pte_for_vaddr(unsigned vaddr)
|
||||
{
|
||||
struct task_struct *task = get_current();
|
||||
struct mm_struct *mm = task->mm;
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (!mm)
|
||||
mm = task->active_mm;
|
||||
pgd = pgd_offset(mm, vaddr);
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
return 0;
|
||||
pmd = pmd_offset(pgd, vaddr);
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
return 0;
|
||||
pte = pte_offset_map(pmd, vaddr);
|
||||
if (!pte)
|
||||
return 0;
|
||||
return pte_val(*pte);
|
||||
}
|
||||
|
||||
enum {
|
||||
TLB_SUSPICIOUS = 1,
|
||||
TLB_INSANE = 2,
|
||||
};
|
||||
|
||||
static void tlb_insane(void)
|
||||
{
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
static void tlb_suspicious(void)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
|
||||
* and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
|
||||
*
|
||||
* Check that valid TLB entries either have the same PA as the PTE, or PTE is
|
||||
* marked as non-present. Non-present PTE and the page with non-zero refcount
|
||||
* and zero mapcount is normal for batched TLB flush operation. Zero refcount
|
||||
* means that the page was freed prematurely. Non-zero mapcount is unusual,
|
||||
* but does not necessary means an error, thus marked as suspicious.
|
||||
*/
|
||||
static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
|
||||
{
|
||||
unsigned tlbidx = w | (e << PAGE_SHIFT);
|
||||
unsigned r0 = dtlb ?
|
||||
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
|
||||
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
|
||||
unsigned pte = get_pte_for_vaddr(vpn);
|
||||
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
|
||||
unsigned tlb_asid = r0 & ASID_MASK;
|
||||
bool kernel = tlb_asid == 1;
|
||||
int rc = 0;
|
||||
|
||||
if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
|
||||
pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
|
||||
dtlb ? 'D' : 'I', w, e, vpn,
|
||||
kernel ? "kernel" : "user");
|
||||
rc |= TLB_INSANE;
|
||||
}
|
||||
|
||||
if (tlb_asid == mm_asid) {
|
||||
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
|
||||
read_itlb_translation(tlbidx);
|
||||
if ((pte ^ r1) & PAGE_MASK) {
|
||||
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
|
||||
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
|
||||
if (pte == 0 || !pte_present(__pte(pte))) {
|
||||
struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
|
||||
pr_err("page refcount: %d, mapcount: %d\n",
|
||||
page_count(p),
|
||||
page_mapcount(p));
|
||||
if (!page_count(p))
|
||||
rc |= TLB_INSANE;
|
||||
else if (page_mapped(p))
|
||||
rc |= TLB_SUSPICIOUS;
|
||||
} else {
|
||||
rc |= TLB_INSANE;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
void check_tlb_sanity(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned w, e;
|
||||
int bug = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
for (w = 0; w < DTLB_ARF_WAYS; ++w)
|
||||
for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
|
||||
bug |= check_tlb_entry(w, e, true);
|
||||
for (w = 0; w < ITLB_ARF_WAYS; ++w)
|
||||
for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
|
||||
bug |= check_tlb_entry(w, e, false);
|
||||
if (bug & TLB_INSANE)
|
||||
tlb_insane();
|
||||
if (bug & TLB_SUSPICIOUS)
|
||||
tlb_suspicious();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DEBUG_TLB_SANITY */
|
||||
|
Ссылка в новой задаче
Block a user