Merge branch 'next/nommu' into for-next
Conflicts: arch/riscv/boot/Makefile arch/riscv/include/asm/sbi.h
Este commit está contenido en:
@@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg
|
||||
endif
|
||||
|
||||
obj-y += init.o
|
||||
obj-y += fault.o
|
||||
obj-y += extable.o
|
||||
obj-y += ioremap.o
|
||||
obj-$(CONFIG_MMU) += fault.o ioremap.o
|
||||
obj-y += cacheflush.o
|
||||
obj-y += context.o
|
||||
obj-y += sifive_l2_cache.o
|
||||
|
@@ -10,9 +10,17 @@
|
||||
|
||||
#include <asm/sbi.h>
|
||||
|
||||
static void ipi_remote_fence_i(void *info)
|
||||
{
|
||||
return local_flush_icache_all();
|
||||
}
|
||||
|
||||
void flush_icache_all(void)
|
||||
{
|
||||
sbi_remote_fence_i(NULL);
|
||||
if (IS_ENABLED(CONFIG_RISCV_SBI))
|
||||
sbi_remote_fence_i(NULL);
|
||||
else
|
||||
on_each_cpu(ipi_remote_fence_i, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -28,7 +36,7 @@ void flush_icache_all(void)
|
||||
void flush_icache_mm(struct mm_struct *mm, bool local)
|
||||
{
|
||||
unsigned int cpu;
|
||||
cpumask_t others, hmask, *mask;
|
||||
cpumask_t others, *mask;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
@@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
|
||||
*/
|
||||
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
||||
local |= cpumask_empty(&others);
|
||||
if (mm != current->active_mm || !local) {
|
||||
riscv_cpuid_to_hartid_mask(&others, &hmask);
|
||||
sbi_remote_fence_i(hmask.bits);
|
||||
} else {
|
||||
if (mm == current->active_mm && local) {
|
||||
/*
|
||||
* It's assumed that at least one strongly ordered operation is
|
||||
* performed on this hart between setting a hart's cpumask bit
|
||||
@@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
|
||||
* with flush_icache_deferred().
|
||||
*/
|
||||
smp_mb();
|
||||
} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
|
||||
cpumask_t hartid_mask;
|
||||
|
||||
riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
|
||||
sbi_remote_fence_i(cpumask_bits(&hartid_mask));
|
||||
} else {
|
||||
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
@@ -66,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void flush_icache_pte(pte_t pte)
|
||||
{
|
||||
struct page *page = pte_page(pte);
|
||||
@@ -73,3 +86,4 @@ void flush_icache_pte(pte_t pte)
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
flush_icache_all();
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
@@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
|
||||
local_flush_tlb_all();
|
||||
#endif
|
||||
|
||||
flush_icache_deferred(next);
|
||||
}
|
||||
|
@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_exception_tables(regs->sepc);
|
||||
fixup = search_exception_tables(regs->epc);
|
||||
if (fixup) {
|
||||
regs->sepc = fixup->fixup;
|
||||
regs->epc = fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||
int code = SEGV_MAPERR;
|
||||
vm_fault_t fault;
|
||||
|
||||
cause = regs->scause;
|
||||
addr = regs->sbadaddr;
|
||||
cause = regs->cause;
|
||||
addr = regs->badaddr;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||
goto vmalloc_fault;
|
||||
|
||||
/* Enable interrupts if they were enabled in the parent context. */
|
||||
if (likely(regs->sstatus & SR_SPIE))
|
||||
if (likely(regs->status & SR_PIE))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
|
@@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
extern char _start[];
|
||||
void *dtb_early_va;
|
||||
|
||||
static void __init zone_sizes_init(void)
|
||||
{
|
||||
@@ -40,7 +41,7 @@ static void __init zone_sizes_init(void)
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
|
||||
void setup_zero_page(void)
|
||||
static void setup_zero_page(void)
|
||||
{
|
||||
memset((void *)empty_zero_page, 0, PAGE_SIZE);
|
||||
}
|
||||
@@ -142,12 +143,12 @@ void __init setup_bootmem(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long va_pa_offset;
|
||||
EXPORT_SYMBOL(va_pa_offset);
|
||||
unsigned long pfn_base;
|
||||
EXPORT_SYMBOL(pfn_base);
|
||||
|
||||
void *dtb_early_va;
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
|
||||
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
|
||||
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
|
||||
@@ -444,6 +445,16 @@ static void __init setup_vm_final(void)
|
||||
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
#else
|
||||
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
||||
{
|
||||
dtb_early_va = (void *)dtb_pa;
|
||||
}
|
||||
|
||||
static inline void setup_vm_final(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
|
Referencia en una nueva incidencia
Block a user