Merge tag 'for-linus' of git://github.com/openrisc/linux

Pull OpenRISC updates from Stafford Horne:
 "The OpenRISC work is a bit more interesting this time, adding SMP
  support and a few general cleanups.

  Small Things:

   - Move OpenRISC docs into Documentation and clean them up

   - Document previously undocumented devicetree bindings

   - Update the or1ksim dts to use stdout-path

  OpenRISC SMP support details:

   - First the "use shadow registers" and "define CPU_BIG_ENDIAN as
     true" get the architecture ready for SMP.

   - The "add 1 and 2 byte cmpxchg support" and "use qspinlocks and
     qrwlocks" add the SMP locking infrastructure as needed. Using the
     qspinlocks and qrwlocks as suggested by Peter Z while reviewing the
     original spinlocks implementation.

   - The "support for ompic" adds a new irqchip device which is used for
     IPI communication to support SMP.

   - The "initial SMP support" adds smp.c and makes changes to all of
     the necessary data-structures to be per-cpu.

  The remaining patches are bug fixes and debug helpers which I wanted
  to keep separate from the "initial SMP support" in order to allow them
  to be reviewed on their own. This includes:

   - add cacheflush support to fix icache aliasing

   - fix initial preempt state for secondary cpu tasks

   - sleep instead of spin on secondary wait

   - support framepointers and STACKTRACE_SUPPORT

   - enable LOCKDEP_SUPPORT and irqflags tracing

   - timer sync: Add tick timer sync logic

   - fix possible deadlock in timer sync, pointed out by mips guys

  Note: the irqchip patch was reviewed with Marc and we agreed to push
  it together with these patches"

* tag 'for-linus' of git://github.com/openrisc/linux:
  openrisc: fix possible deadlock scenario during timer sync
  openrisc: pass endianness info to sparse
  openrisc: add tick timer multi-core sync logic
  openrisc: enable LOCKDEP_SUPPORT and irqflags tracing
  openrisc: support framepointers and STACKTRACE_SUPPORT
  openrisc: add simple_smp dts and defconfig for simulators
  openrisc: add cacheflush support to fix icache aliasing
  openrisc: sleep instead of spin on secondary wait
  openrisc: fix initial preempt state for secondary cpu tasks
  openrisc: initial SMP support
  irqchip: add initial support for ompic
  dt-bindings: add openrisc to vendor prefixes list
  openrisc: use qspinlocks and qrwlocks
  openrisc: add 1 and 2 byte cmpxchg support
  openrisc: use shadow registers to save regs on exception
  dt-bindings: openrisc: Add OpenRISC platform SoC
  Documentation: openrisc: Updates to README
  Documentation: Move OpenRISC docs out of arch/
  MAINTAINERS: Add OpenRISC pic maintainer
  openrisc: dts: or1ksim: Add stdout-path
This commit is contained in:
Linus Torvalds
2017-11-13 12:12:00 -08:00
46 changed files with 1944 additions and 266 deletions

View File

@@ -1,7 +1,6 @@
generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
generic-y += cacheflush.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += current.h
@@ -28,6 +27,10 @@ generic-y += module.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qspinlock_types.h
generic-y += qspinlock.h
generic-y += qrwlock_types.h
generic-y += qrwlock.h
generic-y += sections.h
generic-y += segment.h
generic-y += string.h

View File

@@ -0,0 +1,96 @@
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* OpenRISC implementation:
* Copyright (C) Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
* et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __ASM_CACHEFLUSH_H
#define __ASM_CACHEFLUSH_H
#include <linux/mm.h>
/*
* Helper function for flushing or invalidating entire pages from data
* and instruction caches. SMP needs a little extra work, since we need
* to flush the pages on all cpus.
*/
extern void local_dcache_page_flush(struct page *page);
extern void local_icache_page_inv(struct page *page);
/*
* Data cache flushing always happen on the local cpu. Instruction cache
* invalidations need to be broadcasted to all other cpu in the system in
* case of SMP configurations.
*/
#ifndef CONFIG_SMP
#define dcache_page_flush(page) local_dcache_page_flush(page)
#define icache_page_inv(page) local_icache_page_inv(page)
#else /* CONFIG_SMP */
#define dcache_page_flush(page) local_dcache_page_flush(page)
#define icache_page_inv(page) smp_icache_page_inv(page)
extern void smp_icache_page_inv(struct page *page);
#endif /* CONFIG_SMP */
/*
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
* should be called to make sure the processor sees the newly written code.
*/
static inline void sync_icache_dcache(struct page *page)
{
if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))
dcache_page_flush(page);
icache_page_inv(page);
}
/*
* Pages with this bit set need not be flushed/invalidated, since
* they have not changed since last flush. New pages start with
* PG_arch_1 not set and are therefore dirty by default.
*/
#define PG_dc_clean PG_arch_1
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
clear_bit(PG_dc_clean, &page->flags);
}
/*
* Other interfaces are not required since we do not have virtually
* indexed or tagged caches. So we can use the default here.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
if (vma->vm_flags & VM_EXEC) \
sync_icache_dcache(page); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __ASM_CACHEFLUSH_H */

View File

@@ -1,32 +1,29 @@
/*
* 1,2 and 4 byte cmpxchg and xchg implementations for OpenRISC.
*
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
* Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
* Note:
* The portable implementations of 1 and 2 byte xchg and cmpxchg using a 4
* byte cmpxchg is sourced heavily from the sh and mips implementations.
*/
#ifndef __ASM_OPENRISC_CMPXCHG_H
#define __ASM_OPENRISC_CMPXCHG_H
#include <linux/types.h>
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern void __cmpxchg_called_with_bad_pointer(void);
#include <linux/bitops.h>
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
static inline unsigned long cmpxchg_u32(volatile void *ptr,
unsigned long old, unsigned long new)
{
if (size != 4) {
__cmpxchg_called_with_bad_pointer();
return old;
}
__asm__ __volatile__(
"1: l.lwa %0, 0(%1) \n"
" l.sfeq %0, %2 \n"
@@ -43,28 +40,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
#define cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
extern void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
int size)
static inline unsigned long xchg_u32(volatile void *ptr,
unsigned long val)
{
if (size != 4) {
__xchg_called_with_bad_pointer();
return val;
}
__asm__ __volatile__(
"1: l.lwa %0, 0(%1) \n"
" l.swa 0(%1), %2 \n"
@@ -77,10 +55,115 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return val;
}
static inline u32 cmpxchg_small(volatile void *ptr, u32 old, u32 new,
int size)
{
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
u32 load32, old32, new32;
u32 ret;
load32 = READ_ONCE(*p);
while (true) {
ret = (load32 & bitmask) >> bitoff;
if (old != ret)
return ret;
old32 = (load32 & ~bitmask) | (old << bitoff);
new32 = (load32 & ~bitmask) | (new << bitoff);
/* Do 32 bit cmpxchg */
load32 = cmpxchg_u32(p, old32, new32);
if (load32 == old32)
return old;
}
}
/* xchg */
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
{
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
u32 oldv, newv;
u32 ret;
do {
oldv = READ_ONCE(*p);
ret = (oldv & bitmask) >> bitoff;
newv = (oldv & ~bitmask) | (x << bitoff);
} while (cmpxchg_u32(p, oldv, newv) != oldv);
return ret;
}
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern unsigned long __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 1:
case 2:
return cmpxchg_small(ptr, old, new, size);
case 4:
return cmpxchg_u32(ptr, old, new);
default:
return __cmpxchg_called_with_bad_pointer();
}
}
#define cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
int size)
{
switch (size) {
case 1:
case 2:
return xchg_small(ptr, with, size);
case 4:
return xchg_u32(ptr, with);
default:
return __xchg_called_with_bad_pointer();
}
}
#define xchg(ptr, with) \
({ \
(__typeof__(*(ptr))) __xchg((unsigned long)(with), \
(ptr), \
(__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \
sizeof(*(ptr))); \
})

View File

@@ -19,7 +19,7 @@
#ifndef __ASM_OPENRISC_CPUINFO_H
#define __ASM_OPENRISC_CPUINFO_H
struct cpuinfo {
struct cpuinfo_or1k {
u32 clock_frequency;
u32 icache_size;
@@ -29,8 +29,11 @@ struct cpuinfo {
u32 dcache_size;
u32 dcache_block_size;
u32 dcache_ways;
u16 coreid;
};
extern struct cpuinfo cpuinfo;
extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
extern void setup_cpuinfo(void);
#endif /* __ASM_OPENRISC_CPUINFO_H */

View File

@@ -34,7 +34,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* registers like cr3 on the i386
*/
extern volatile pgd_t *current_pgd; /* defined in arch/openrisc/mm/fault.c */
extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{

View File

@@ -94,7 +94,7 @@ extern void paging_init(void);
* 64 MB of vmalloc area is comparable to what's available on other arches.
*/
#define VMALLOC_START (PAGE_OFFSET-0x04000000)
#define VMALLOC_START (PAGE_OFFSET-0x04000000UL)
#define VMALLOC_END (PAGE_OFFSET)
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
@@ -416,15 +416,19 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
struct vm_area_struct;
/*
* or32 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*
* Actually I am not sure on what this could be used for.
*/
static inline void update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *pte)
{
}
extern void update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *pte)
{
update_tlb(vma, address, pte);
update_cache(vma, address, pte);
}
/* __PHX__ FIXME, SWAP, this probably doesn't work */

View File

@@ -29,7 +29,7 @@
* it needs to be correct to get the early console working.
*/
#define BASE_BAUD (cpuinfo.clock_frequency/16)
#define BASE_BAUD (cpuinfo_or1k[smp_processor_id()].clock_frequency/16)
#endif /* __KERNEL__ */

View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ASM_OPENRISC_SMP_H
#define __ASM_OPENRISC_SMP_H
#include <asm/spr.h>
#include <asm/spr_defs.h>
#define raw_smp_processor_id() (current_thread_info()->cpu)
#define hard_smp_processor_id() mfspr(SPR_COREID)
extern void smp_init_cpus(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
extern void handle_IPI(unsigned int ipi_msg);
#endif /* __ASM_OPENRISC_SMP_H */

View File

@@ -19,6 +19,16 @@
#ifndef __ASM_OPENRISC_SPINLOCK_H
#define __ASM_OPENRISC_SPINLOCK_H
#error "or32 doesn't do SMP yet"
#include <asm/qspinlock.h>
#include <asm/qrwlock.h>
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif

View File

@@ -0,0 +1,7 @@
#ifndef _ASM_OPENRISC_SPINLOCK_TYPES_H
#define _ASM_OPENRISC_SPINLOCK_TYPES_H
#include <asm/qspinlock_types.h>
#include <asm/qrwlock_types.h>
#endif /* _ASM_OPENRISC_SPINLOCK_TYPES_H */

View File

@@ -51,6 +51,11 @@
#define SPR_ICCFGR (SPRGROUP_SYS + 6)
#define SPR_DCFGR (SPRGROUP_SYS + 7)
#define SPR_PCCFGR (SPRGROUP_SYS + 8)
#define SPR_VR2 (SPRGROUP_SYS + 9)
#define SPR_AVR (SPRGROUP_SYS + 10)
#define SPR_EVBAR (SPRGROUP_SYS + 11)
#define SPR_AECR (SPRGROUP_SYS + 12)
#define SPR_AESR (SPRGROUP_SYS + 13)
#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */
#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */
#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */
@@ -61,6 +66,8 @@
#define SPR_EEAR_LAST (SPRGROUP_SYS + 63)
#define SPR_ESR_BASE (SPRGROUP_SYS + 64)
#define SPR_ESR_LAST (SPRGROUP_SYS + 79)
#define SPR_COREID (SPRGROUP_SYS + 128)
#define SPR_NUMCORES (SPRGROUP_SYS + 129)
#define SPR_GPR_BASE (SPRGROUP_SYS + 1024)
/* Data MMU group */
@@ -135,11 +142,18 @@
#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */
#define SPR_VR_RES 0x0000ffc0 /* Reserved */
#define SPR_VR_REV 0x0000003f /* Processor revision */
#define SPR_VR_UVRP 0x00000040 /* Updated Version Registers Present */
#define SPR_VR_VER_OFF 24
#define SPR_VR_CFG_OFF 16
#define SPR_VR_REV_OFF 0
/*
* Bit definitions for the Version Register 2
*/
#define SPR_VR2_CPUID 0xff000000 /* Processor ID */
#define SPR_VR2_VER 0x00ffffff /* Processor version */
/*
* Bit definitions for the Unit Present Register
*

View File

@@ -74,7 +74,7 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.ksp = 0, \
}

View File

@@ -0,0 +1,23 @@
/*
* OpenRISC timer API
*
* Copyright (C) 2017 by Stafford Horne (shorne@gmail.com)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_OR1K_TIME_H
#define __ASM_OR1K_TIME_H
extern void openrisc_clockevent_init(void);
extern void openrisc_timer_set(unsigned long count);
extern void openrisc_timer_set_next(unsigned long delta);
#ifdef CONFIG_SMP
extern void synchronise_count_master(int cpu);
extern void synchronise_count_slave(int cpu);
#endif
#endif /* __ASM_OR1K_TIME_H */

View File

@@ -33,13 +33,26 @@
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
#ifndef CONFIG_SMP
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
#else
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif
static inline void flush_tlb(void)
{

View File

@@ -0,0 +1,20 @@
/*
* OpenRISC unwinder.h
*
* Architecture API for unwinding stacks.
*
* Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ASM_OPENRISC_UNWINDER_H
#define __ASM_OPENRISC_UNWINDER_H
void unwind_stack(void *data, unsigned long *stack,
void (*trace)(void *data, unsigned long addr,
int reliable));
#endif /* __ASM_OPENRISC_UNWINDER_H */