Files
android_kernel_xiaomi_sm8450/arch/nios2/mm/cacheflush.c
Nicholas Piggin ef5cbcb6bf nios2: update_mmu_cache clear the old entry from the TLB
Fault paths like do_read_fault will install a Linux pte with the young
bit clear. The CPU will fault again because the TLB has not been
updated, this time a valid pte exists so handle_pte_fault will just
set the young bit with ptep_set_access_flags, which flushes the TLB.

The TLB is flushed so the next attempt will go to the fast TLB handler
which loads the TLB with the new Linux pte. The access then proceeds.

This design is fragile to depend on the young bit being clear after
the initial Linux fault. A proposed core mm change to immediately set
the young bit upon such a fault, results in ptep_set_access_flags not
flushing the TLB because it finds no change to the pte. The spurious
fault fix path only flushes the TLB if the access was a store. If it
was a load, then this results in an infinite loop of page faults.

This change adds a TLB flush in update_mmu_cache, which removes that
TLB entry upon the first fault. This will cause the fast TLB handler
to load the new pte and avoid the Linux page fault entirely.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Ley Foon Tan <ley.foon.tan@intel.com>
2019-03-07 05:29:35 +08:00

272 lines
6.9 KiB
C

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009, Wind River Systems Inc
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <asm/cacheflush.h>
#include <asm/cpuinfo.h>
static void __flush_dcache(unsigned long start, unsigned long end)
{
unsigned long addr;
start &= ~(cpuinfo.dcache_line_size - 1);
end += (cpuinfo.dcache_line_size - 1);
end &= ~(cpuinfo.dcache_line_size - 1);
if (end > start + cpuinfo.dcache_size)
end = start + cpuinfo.dcache_size;
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
__asm__ __volatile__ (" flushd 0(%0)\n"
: /* Outputs */
: /* Inputs */ "r"(addr)
/* : No clobber */);
}
}
static void __invalidate_dcache(unsigned long start, unsigned long end)
{
unsigned long addr;
start &= ~(cpuinfo.dcache_line_size - 1);
end += (cpuinfo.dcache_line_size - 1);
end &= ~(cpuinfo.dcache_line_size - 1);
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
__asm__ __volatile__ (" initda 0(%0)\n"
: /* Outputs */
: /* Inputs */ "r"(addr)
/* : No clobber */);
}
}
static void __flush_icache(unsigned long start, unsigned long end)
{
unsigned long addr;
start &= ~(cpuinfo.icache_line_size - 1);
end += (cpuinfo.icache_line_size - 1);
end &= ~(cpuinfo.icache_line_size - 1);
if (end > start + cpuinfo.icache_size)
end = start + cpuinfo.icache_size;
for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
__asm__ __volatile__ (" flushi %0\n"
: /* Outputs */
: /* Inputs */ "r"(addr)
/* : No clobber */);
}
__asm__ __volatile(" flushp\n");
}
static void flush_aliases(struct address_space *mapping, struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *mpnt;
pgoff_t pgoff;
pgoff = page->index;
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
unsigned long offset;
if (mpnt->vm_mm != mm)
continue;
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset,
page_to_pfn(page));
}
flush_dcache_mmap_unlock(mapping);
}
void flush_cache_all(void)
{
__flush_dcache(0, cpuinfo.dcache_size);
__flush_icache(0, cpuinfo.icache_size);
}
void flush_cache_mm(struct mm_struct *mm)
{
flush_cache_all();
}
void flush_cache_dup_mm(struct mm_struct *mm)
{
flush_cache_all();
}
void flush_icache_range(unsigned long start, unsigned long end)
{
__flush_dcache(start, end);
__flush_icache(start, end);
}
void flush_dcache_range(unsigned long start, unsigned long end)
{
__flush_dcache(start, end);
__flush_icache(start, end);
}
EXPORT_SYMBOL(flush_dcache_range);
void invalidate_dcache_range(unsigned long start, unsigned long end)
{
__invalidate_dcache(start, end);
}
EXPORT_SYMBOL(invalidate_dcache_range);
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__flush_dcache(start, end);
if (vma == NULL || (vma->vm_flags & VM_EXEC))
__flush_icache(start, end);
}
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long start = (unsigned long) page_address(page);
unsigned long end = start + PAGE_SIZE;
__flush_dcache(start, end);
__flush_icache(start, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long pfn)
{
unsigned long start = vmaddr;
unsigned long end = start + PAGE_SIZE;
__flush_dcache(start, end);
if (vma->vm_flags & VM_EXEC)
__flush_icache(start, end);
}
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
unsigned long start = (unsigned long)page_address(page);
__flush_dcache(start, start + PAGE_SIZE);
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping_file(page);
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
clear_bit(PG_dcache_clean, &page->flags);
} else {
__flush_dcache_page(mapping, page);
if (mapping) {
unsigned long start = (unsigned long)page_address(page);
flush_aliases(mapping, page);
flush_icache_range(start, start + PAGE_SIZE);
}
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *pte)
{
unsigned long pfn = pte_pfn(*pte);
struct page *page;
struct address_space *mapping;
flush_tlb_page(vma, address);
if (!pfn_valid(pfn))
return;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
page = pfn_to_page(pfn);
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if(mapping)
{
flush_aliases(mapping, page);
if (vma->vm_flags & VM_EXEC)
flush_icache_page(vma, page);
}
}
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to)
{
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
__flush_icache(vaddr, vaddr + PAGE_SIZE);
copy_page(vto, vfrom);
__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
}
void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
{
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
__flush_icache(vaddr, vaddr + PAGE_SIZE);
clear_page(addr);
__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr,
void *dst, void *src, int len)
{
flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len);
__flush_dcache((unsigned long)src, (unsigned long)src + len);
if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)src, (unsigned long)src + len);
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr,
void *dst, void *src, int len)
{
flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len);
__flush_dcache((unsigned long)dst, (unsigned long)dst + len);
if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)dst, (unsigned long)dst + len);
}