Merge tag 'riscv-for-linus-5.2-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux

Pull RISC-V updates from Palmer Dabbelt:
 "This contains an assortment of RISC-V related patches that I'd like to
  target for the 5.2 merge window. Most of the patches are cleanups, but
  there are a handful of user-visible changes:

   - The nosmp and nr_cpus command-line arguments are now supported,
     which work like normal.

   - The SBI console no longer installs itself as a preferred console,
     we rely on standard mechanisms (/chosen, command-line, hueristics)
     instead.

   - sfence_remove_sfence_vma{,_asid} now pass their arguments along to
     the SBI call.

   - Modules now support BUG().

   - A missing sfence.vma during boot has been added. This bug only
     manifests during boot.

   - The arch/riscv support for SiFive's L2 cache controller has been
     merged, which should un-block the EDAC framework work.

  I've only tested this on QEMU again, as I didn't have time to get
  things running on the Unleashed. The latest master from this morning
  merges in cleanly and passes the tests as well"

* tag 'riscv-for-linus-5.2-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux: (31 commits)
  riscv: fix locking violation in page fault handler
  RISC-V: sifive_l2_cache: Add L2 cache controller driver for SiFive SoCs
  RISC-V: Add DT documentation for SiFive L2 Cache Controller
  RISC-V: Avoid using invalid intermediate translations
  riscv: Support BUG() in kernel module
  riscv: Add the support for c.ebreak check in is_valid_bugaddr()
  riscv: support trap-based WARN()
  riscv: fix sbi_remote_sfence_vma{,_asid}.
  riscv: move switch_mm to its own file
  riscv: move flush_icache_{all,mm} to cacheflush.c
  tty: Don't force RISCV SBI console as preferred console
  RISC-V: Access CSRs using CSR numbers
  RISC-V: Add interrupt related SCAUSE defines in asm/csr.h
  RISC-V: Use tabs to align macro values in asm/csr.h
  RISC-V: Fix minor checkpatch issues.
  RISC-V: Support nr_cpus command line option.
  RISC-V: Implement nosmp commandline option.
  RISC-V: Add RISC-V specific arch_match_cpu_phys_id
  riscv: vdso: drop unnecessary cc-ldoption
  riscv: call pm_power_off from machine_halt / machine_power_off
  ...
This commit is contained in:
Linus Torvalds
2019-05-19 09:56:36 -07:00
當前提交 b0bb1269b9
共有 36 個文件被更改,包括 632 次插入318 次删除

查看文件

@@ -9,3 +9,5 @@ obj-y += fault.o
obj-y += extable.o
obj-y += ioremap.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o

查看文件

@@ -14,6 +14,67 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
#include <asm/sbi.h>
void flush_icache_all(void)
{
sbi_remote_fence_i(NULL);
}
/*
* Performs an icache flush for the given MM context. RISC-V has no direct
* mechanism for instruction cache shoot downs, so instead we send an IPI that
* informs the remote harts they need to flush their local instruction caches.
* To avoid pathologically slow behavior in a common case (a bunch of
* single-hart processes on a many-hart machine, ie 'make -j') we avoid the
* IPIs for harts that are not currently executing a MM context and instead
* schedule a deferred local instruction cache flush to be performed before
* execution resumes on each hart.
*/
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
cpumask_t others, hmask, *mask;
preempt_disable();
/* Mark every hart's icache as needing a flush for this MM. */
mask = &mm->context.icache_stale_mask;
cpumask_setall(mask);
/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mask);
local_flush_icache_all();
/*
* Flush the I$ of other harts concurrently executing, and mark them as
* flushed.
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
if (mm != current->active_mm || !local) {
cpumask_clear(&hmask);
riscv_cpuid_to_hartid_mask(&others, &hmask);
sbi_remote_fence_i(hmask.bits);
} else {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
* and scheduling this MM context on that hart. Sending an SBI
* remote message will do this, but in the case where no
* messages are sent we still need to order this hart's writes
* with flush_icache_deferred().
*/
smp_mb();
}
preempt_enable();
}
#endif /* CONFIG_SMP */
void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);

69
arch/riscv/mm/context.c Normal file
查看文件

@@ -0,0 +1,69 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#include <linux/mm.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
/*
* When necessary, performs a deferred icache flush for the given MM context,
* on the local CPU. RISC-V has no direct mechanism for instruction cache
* shoot downs, so instead we send an IPI that informs the remote harts they
* need to flush their local instruction caches. To avoid pathologically slow
* behavior in a common case (a bunch of single-hart processes on a many-hart
* machine, ie 'make -j') we avoid the IPIs for harts that are not currently
* executing a MM context and instead schedule a deferred local instruction
* cache flush to be performed before execution resumes on each hart. This
* actually performs that local instruction cache flush, which implicitly only
* refers to the current hart.
*/
static inline void flush_icache_deferred(struct mm_struct *mm)
{
#ifdef CONFIG_SMP
unsigned int cpu = smp_processor_id();
cpumask_t *mask = &mm->context.icache_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
/*
* Ensure the remote hart's writes are visible to this hart.
* This pairs with a barrier in flush_icache_mm.
*/
smp_mb();
local_flush_icache_all();
}
#endif
}
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *task)
{
unsigned int cpu;
if (unlikely(prev == next))
return;
/*
* Mark the current MM context as inactive, and the next as
* active. This is at least used by the icache flushing
* routines in order to determine who should be flushed.
*/
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* Use the old spbtr name instead of using the current satp
* name to support binutils 2.29 which doesn't know about the
* privileged ISA 1.10 yet.
*/
csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all();
flush_icache_deferred(next);
}

查看文件

@@ -229,8 +229,9 @@ vmalloc_fault:
pte_t *pte_k;
int index;
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs))
goto bad_area;
return do_trap(regs, SIGSEGV, code, addr, tsk);
/*
* Synchronize this task's top level page-table
@@ -239,13 +240,9 @@ vmalloc_fault:
* Do _not_ use "tsk->active_mm->pgd" here.
* We might be inside an interrupt in the middle
* of a task switch.
*
* Note: Use the old spbtr name instead of using the current
* satp name to support binutils 2.29 which doesn't know about
* the privileged ISA 1.10 yet.
*/
index = pgd_index(addr);
pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))

查看文件

@@ -0,0 +1,175 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SiFive L2 cache controller Driver
*
* Copyright (C) 2018-2019 SiFive, Inc.
*
*/
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <asm/sifive_l2_cache.h>
#define SIFIVE_L2_DIRECCFIX_LOW 0x100
#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
#define SIFIVE_L2_DATECCFIX_LOW 0x140
#define SIFIVE_L2_DATECCFIX_HIGH 0x144
#define SIFIVE_L2_DATECCFIX_COUNT 0x148
#define SIFIVE_L2_DATECCFAIL_LOW 0x160
#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
#define SIFIVE_L2_CONFIG 0x00
#define SIFIVE_L2_WAYENABLE 0x08
#define SIFIVE_L2_ECCINJECTERR 0x40
#define SIFIVE_L2_MAX_ECCINTR 3
static void __iomem *l2_base;
static int g_irq[SIFIVE_L2_MAX_ECCINTR];
enum {
DIR_CORR = 0,
DATA_CORR,
DATA_UNCORR,
};
#ifdef CONFIG_DEBUG_FS
static struct dentry *sifive_test;
static ssize_t l2_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
unsigned int val;
if (kstrtouint_from_user(data, count, 0, &val))
return -EINVAL;
if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
else
return -EINVAL;
return count;
}
static const struct file_operations l2_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = l2_write
};
static void setup_sifive_debug(void)
{
sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
debugfs_create_file("sifive_debug_inject_error", 0200,
sifive_test, NULL, &l2_fops);
}
#endif
static void l2_config_read(void)
{
u32 regval, val;
regval = readl(l2_base + SIFIVE_L2_CONFIG);
val = regval & 0xFF;
pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
val = (regval & 0xFF00) >> 8;
pr_info("L2CACHE: No. of ways per bank: %d\n", val);
val = (regval & 0xFF0000) >> 16;
pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
val = (regval & 0xFF000000) >> 24;
pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
}
static const struct of_device_id sifive_l2_ids[] = {
{ .compatible = "sifive,fu540-c000-ccache" },
{ /* end of table */ },
};
static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
int register_sifive_l2_error_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&l2_err_chain, nb);
}
EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&l2_err_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
static irqreturn_t l2_int_handler(int irq, void *device)
{
unsigned int regval, add_h, add_l;
if (irq == g_irq[DIR_CORR]) {
add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
regval = readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
"DirECCFix");
}
if (irq == g_irq[DATA_CORR]) {
add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
regval = readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
"DatECCFix");
}
if (irq == g_irq[DATA_UNCORR]) {
add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
regval = readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
"DatECCFail");
}
return IRQ_HANDLED;
}
int __init sifive_l2_init(void)
{
struct device_node *np;
struct resource res;
int i, rc;
np = of_find_matching_node(NULL, sifive_l2_ids);
if (!np)
return -ENODEV;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
l2_base = ioremap(res.start, resource_size(&res));
if (!l2_base)
return -ENOMEM;
for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
g_irq[i] = irq_of_parse_and_map(np, i);
rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
if (rc) {
pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
return rc;
}
}
l2_config_read();
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
return 0;
}
device_initcall(sifive_l2_init);