Merge branch 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "Many great new features, fixes and optimizations, including:

   - Convert page table updates to use per-pagetable spinlocks which
     overall improves performance on SMP machines a lot, by Mikulas
     Patocka

   - Kernel debugger (KGDB) support, by Sven Schnelle

   - KPROBES support, by Sven Schnelle

   - Lots of TLB lock/flush improvements, by Dave Anglin

   - Drop DISCONTIGMEM and switch to SPARSEMEM

   - Added JUMP_LABEL, branch runtime-patching support

   - Lots of other small speedups and cleanups, e.g. for QEMU, stack
     randomization, avoidance of name clashes, documentation updates,
     etc ..."

* 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (28 commits)
  parisc: Add static branch and JUMP_LABEL feature
  parisc: Use PA_ASM_LEVEL in boot code
  parisc: Rename LEVEL to PA_ASM_LEVEL to avoid name clash with DRBD code
  parisc: Update huge TLB page support to use per-pagetable spinlock
  parisc: Use per-pagetable spinlock
  parisc: Allow live-patching of __meminit functions
  parisc: Add memory barrier to asm pdc and sync instructions
  parisc: Add memory clobber to TLB purges
  parisc: Use ldcw instruction for SMP spinlock release barrier
  parisc: Remove lock code to serialize TLB operations in pacache.S
  parisc: Switch from DISCONTIGMEM to SPARSEMEM
  parisc: enable wide mode early
  parisc: update feature lists
  parisc: Show n/a if product number not available
  parisc: remove unused flags parameter in __patch_text()
  doc: update kprobes supported architecture list
  parisc: Implement kretprobes
  parisc: remove kprobes.h from generic-y
  parisc: Implement kprobes
  parisc: add functions required by KPROBE_EVENTS
  ...
This commit is contained in:
Linus Torvalds
2019-05-07 19:34:17 -07:00
50 changed files with 1258 additions and 295 deletions

View File

@@ -10,7 +10,6 @@ generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h

View File

@@ -61,14 +61,14 @@
#define LDCW ldcw,co
#define BL b,l
# ifdef CONFIG_64BIT
# define LEVEL 2.0w
# define PA_ASM_LEVEL 2.0w
# else
# define LEVEL 2.0
# define PA_ASM_LEVEL 2.0
# endif
#else
#define LDCW ldcw
#define BL bl
#define LEVEL 1.1
#define PA_ASM_LEVEL 1.1
#endif
#ifdef __ASSEMBLY__

View File

@@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#endif /* ! __ASSEMBLY__ */

View File

@@ -15,17 +15,34 @@
* from areas congruently mapped with user space. It is 8MB large
* and must be 16MB aligned */
#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
#define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT)
#define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE)
/* This is the kernel area for all maps (vmalloc, dma etc.) most
* usually, it extends up to TMPALIAS_MAP_START. Virtual addresses
* 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
#define KERNEL_MAP_END (TMPALIAS_MAP_START)
#define KERNEL_MAP_END (FIXMAP_START)
#ifndef __ASSEMBLY__
enum fixed_addresses {
/* Support writing RO kernel text via kprobes, jump labels, etc. */
FIX_TEXT_POKE0,
FIX_BITMAP_COUNT
};
extern void *parisc_vmalloc_start;
#define PCXL_DMA_MAP_SIZE (8*1024*1024)
#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
#define VMALLOC_END (KERNEL_MAP_END)
#define __fix_to_virt(_x) (FIXMAP_START + ((_x) << PAGE_SHIFT))
void set_fixmap(enum fixed_addresses idx, phys_addr_t phys);
void clear_fixmap(enum fixed_addresses idx);
#endif /*__ASSEMBLY__*/
#endif /*_ASM_FIXMAP_H*/

View File

@@ -120,7 +120,7 @@ extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern void init_parisc_bus(void);
extern struct device *hwpath_to_device(struct hardware_path *modpath);
extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
extern int machine_has_merced_bus(void);
/* inventory.c: */
extern void do_memory_inventory(void);

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PARISC_JUMP_LABEL_H
#define _ASM_PARISC_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/assembly.h>
#define JUMP_LABEL_NOP_SIZE 4
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif

View File

@@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*
*/
#ifndef __PARISC_KGDB_H__
#define __PARISC_KGDB_H__
#define BREAK_INSTR_SIZE 4
#define PARISC_KGDB_COMPILED_BREAK_INSN 0x3ffc01f
#define PARISC_KGDB_BREAK_INSN 0x3ffa01f
#define NUMREGBYTES sizeof(struct parisc_gdb_regs)
#define BUFMAX 4096
#define CACHE_FLUSH_IS_SAFE 1
#ifndef __ASSEMBLY__
static inline void arch_kgdb_breakpoint(void)
{
asm(".word %0" : : "i"(PARISC_KGDB_COMPILED_BREAK_INSN) : "memory");
}
struct parisc_gdb_regs {
unsigned long gpr[32];
unsigned long sar;
unsigned long iaoq_f;
unsigned long iasq_f;
unsigned long iaoq_b;
unsigned long iasq_b;
unsigned long eiem;
unsigned long iir;
unsigned long isr;
unsigned long ior;
unsigned long ipsw;
unsigned long __unused0;
unsigned long sr4;
unsigned long sr0;
unsigned long sr1;
unsigned long sr2;
unsigned long sr3;
unsigned long sr5;
unsigned long sr6;
unsigned long sr7;
unsigned long cr0;
unsigned long pid1;
unsigned long pid2;
unsigned long scrccr;
unsigned long pid3;
unsigned long pid4;
unsigned long cr24;
unsigned long cr25;
unsigned long cr26;
unsigned long cr27;
unsigned long cr28;
unsigned long cr29;
unsigned long cr30;
u64 fr[32];
};
#endif
#endif

View File

@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/parisc/include/asm/kprobes.h
*
* PA-RISC kprobes implementation
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*/
#ifndef _PARISC_KPROBES_H
#define _PARISC_KPROBES_H
#ifdef CONFIG_KPROBES
#include <asm-generic/kprobes.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#define PARISC_KPROBES_BREAK_INSN 0x3ff801f
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
typedef u32 kprobe_opcode_t;
struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
#define flush_insn_slot(p) \
flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \
(unsigned long)&(p)->ainsn.insn[0] + \
sizeof(kprobe_opcode_t))
#define kretprobe_blacklist_size 0
struct arch_specific_insn {
kprobe_opcode_t *insn;
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
unsigned long iaoq[2];
};
int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs);
int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* _PARISC_KPROBES_H */

View File

@@ -2,62 +2,6 @@
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
#define MAX_PHYSMEM_RANGES 4 /* Fix the size for now (current known max is 3) */
#ifdef CONFIG_DISCONTIGMEM
extern int npmem_ranges;
struct node_map_data {
pg_data_t pg_data;
};
extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
* Stretch (N-class): 0-2, 4-32, 34-xxx
*/
/* Since each 1GB can only belong to one region (node), we can create
* an index table for pfn to nid lookup; each entry in pfnnid_map
* represents 1GB, and contains the node that the memory belongs to. */
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
#define PFNNID_MAP_MAX 512 /* support 512GB */
extern signed char pfnnid_map[PFNNID_MAP_MAX];
#ifndef CONFIG_64BIT
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
#else
/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
#endif
static inline int pfn_to_nid(unsigned long pfn)
{
unsigned int i;
if (unlikely(pfn_is_io(pfn)))
return 0;
i = pfn >> PFNNID_SHIFT;
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
return pfnnid_map[i];
}
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
if (nid >= 0)
return (pfn < node_end_pfn(nid));
return 0;
}
#endif
#endif /* _PARISC_MMZONE_H */

View File

@@ -147,9 +147,9 @@ extern int npmem_ranges;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_SPARSEMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_DISCONTIGMEM */
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */

View File

@@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PARISC_KERNEL_PATCH_H
#define _PARISC_KERNEL_PATCH_H
/* stop machine and patch kernel text */
void patch_text(void *addr, unsigned int insn);
/* patch kernel text with machine already stopped (e.g. in kgdb) */
void __patch_text(void *addr, unsigned int insn);
#endif

View File

@@ -41,6 +41,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
}
spin_lock_init(pgd_spinlock(actual_pgd));
return actual_pgd;
}

View File

@@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
extern spinlock_t pa_tlb_lock;
static inline spinlock_t *pgd_spinlock(pgd_t *);
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock;
*/
#define kern_addr_valid(addr) (1)
/* Purge data and instruction TLB entries. Must be called holding
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
* machines since the purge must be broadcast to all CPUs.
/* This is for the serialization of PxTLB broadcasts. At least on the N class
* systems, only one PxTLB inter processor broadcast can be active at any one
* time on the Merced bus.
* PTE updates are protected by locks in the PMD.
*/
extern spinlock_t pa_tlb_flush_lock;
extern spinlock_t pa_swapper_pg_lock;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
extern int pa_serialize_tlb_flushes;
#else
#define pa_serialize_tlb_flushes (0)
#endif
#define purge_tlb_start(flags) do { \
if (pa_serialize_tlb_flushes) \
spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
else \
local_irq_save(flags); \
} while (0)
#define purge_tlb_end(flags) do { \
if (pa_serialize_tlb_flushes) \
spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
else \
local_irq_restore(flags); \
} while (0)
/* Purge data and instruction TLB entries. The TLB purge instructions
* are slow on SMP machines since the purge must be broadcast to all CPUs.
*/
static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
}
/* Certain architectures need to do special things when PTEs
@@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
do { \
pte_t old_pte; \
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
old_pte = *ptep; \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
#else
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PGD_ALLOC_ORDER PGD_ORDER
#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
@@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
{
if (unlikely(pgd == swapper_pg_dir))
return &pa_swapper_pg_lock;
return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
@@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(*ptep))
return 0;
spin_lock_irqsave(&pa_tlb_lock, flags);
spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep;
if (!pte_young(pte)) {
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 1;
}
@@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte;
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep;
set_pte(ptep, __pte(0));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
return old_pte;
}
@@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))

View File

@@ -37,4 +37,17 @@ extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
#define kernel_stack_pointer(regs) ((regs)->gr[30])
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
#endif

View File

@@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_PARISC_SPARSEMEM_H
#define ASM_PARISC_SPARSEMEM_H
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
* Stretch (N-class): 0-2, 4-32, 34-xxx
*/
#define MAX_PHYSMEM_BITS 39 /* 512 GB */
#define SECTION_SIZE_BITS 27 /* 128 MB */
#endif

View File

@@ -37,7 +37,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
#ifdef CONFIG_SMP
(void) __ldcw(a);
#else
mb();
#endif
*a = 1;
}

View File

@@ -8,21 +8,6 @@
#include <linux/sched.h>
#include <asm/mmu_context.h>
/* This is for the serialisation of PxTLB broadcasts. At least on the
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
* it on all systems not just the N class.
* It is also used to ensure PTE updates are atomic and consistent
* with the TLB.
*/
extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
extern void flush_tlb_all(void);
extern void flush_tlb_all_local(void *);
@@ -79,13 +64,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
unsigned long flags, sid;
sid = vma->vm_mm->context;
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
purge_tlb_entries(vma->vm_mm, addr);
}
#endif