Merge tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Paul Walmsley:
 "Add the following new features:

   - Generic CPU topology description support for DT-based platforms,
     including ARM64, ARM and RISC-V.

   - Sparsemem support

   - Perf callchain support

   - SiFive PLIC irqchip modifications, in preparation for M-mode Linux

  and clean up the code base:

   - Clean up chip-specific register (CSR) manipulation code, IPIs, TLB
     flushing, and the RISC-V CPU-local timer code

   - Kbuild cleanup from one of the Kbuild maintainers"

[ The CPU topology parts came in through the arm64 tree with a shared
  branch   - Linus ]

* tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  irqchip/sifive-plic: set max threshold for ignored handlers
  riscv: move the TLB flush logic out of line
  riscv: don't use the rdtime(h) pseudo-instructions
  riscv: cleanup riscv_cpuid_to_hartid_mask
  riscv: optimize send_ipi_single
  riscv: cleanup send_ipi_mask
  riscv: refactor the IPI code
  riscv: Add support for libdw
  riscv: Add support for perf registers sampling
  riscv: Add perf callchain support
  riscv: add arch/riscv/Kbuild
  RISC-V: Implement sparsemem
  riscv: Using CSR numbers to access CSRs
This commit is contained in:
Linus Torvalds
2019-09-16 15:29:34 -07:00
34 changed files with 662 additions and 126 deletions

View File

@@ -110,8 +110,10 @@ extern unsigned long min_low_pfn;
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \
(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
#endif
#define ARCH_PFN_OFFSET (pfn_base)

View File

@@ -83,6 +83,19 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
#define vmemmap ((struct page *)VMEMMAP_START)
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.

View File

@@ -61,11 +61,5 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
return boot_cpu_hartid;
}
static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
struct cpumask *out)
{
cpumask_set_cpu(cpuid_to_hartid_map(0), out);
}
#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_SMP_H */

View File

@@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPARSEMEM_H
#define __ASM_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS CONFIG_PA_BITS
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
#endif /* __ASM_SPARSEMEM_H */

View File

@@ -6,43 +6,41 @@
#ifndef _ASM_RISCV_TIMEX_H
#define _ASM_RISCV_TIMEX_H
#include <asm/param.h>
#include <asm/csr.h>
typedef unsigned long cycles_t;
static inline cycles_t get_cycles_inline(void)
static inline cycles_t get_cycles(void)
{
cycles_t n;
__asm__ __volatile__ (
"rdtime %0"
: "=r" (n));
return n;
return csr_read(CSR_TIME);
}
#define get_cycles get_cycles_inline
#define get_cycles get_cycles
#ifdef CONFIG_64BIT
static inline uint64_t get_cycles64(void)
static inline u64 get_cycles64(void)
{
return get_cycles();
return get_cycles();
}
#else
static inline uint64_t get_cycles64(void)
#else /* CONFIG_64BIT */
static inline u32 get_cycles_hi(void)
{
u32 lo, hi, tmp;
__asm__ __volatile__ (
"1:\n"
"rdtimeh %0\n"
"rdtime %1\n"
"rdtimeh %2\n"
"bne %0, %2, 1b"
: "=&r" (hi), "=&r" (lo), "=&r" (tmp));
return csr_read(CSR_TIMEH);
}
static inline u64 get_cycles64(void)
{
u32 hi, lo;
do {
hi = get_cycles_hi();
lo = get_cycles();
} while (hi != get_cycles_hi());
return ((u64)hi << 32) | lo;
}
#endif
#endif /* CONFIG_64BIT */
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
*timer_val = get_cycles();

View File

@@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
}
#ifndef CONFIG_SMP
#ifdef CONFIG_SMP
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
@@ -37,35 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}
#define flush_tlb_mm(mm) flush_tlb_all()
#else /* CONFIG_SMP */
#include <asm/sbi.h>
static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
unsigned long size)
{
struct cpumask hmask;
cpumask_clear(&hmask);
riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma(hmask.bits, start, size);
}
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
#define flush_tlb_range(vma, start, end) \
remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
flush_tlb_range(vma, addr, addr + PAGE_SIZE);
}
#define flush_tlb_mm(mm) \
remote_sfence_vma(mm_cpumask(mm), 0, -1)
#endif /* CONFIG_SMP */
/* Flush a range of kernel pages */