
powerpc expects IRQs to already be (soft) disabled when switch_mm() is called, as made clear in the commit message of9c1e105238
("powerpc: Allow perf_counters to access user memory at interrupt time"). Aside from any race conditions that might exist between switch_mm() and an IRQ, there is also an unconditional hard_irq_disable() in switch_slb(). If that isn't followed at some point by an IRQ enable then interrupts will remain disabled until we return to userspace. It is true that when switch_mm() is called from the scheduler IRQs are off, but not when it's called by use_mm(). Looking closer we see that last year in commitf98db6013c
("sched/core: Add switch_mm_irqs_off() and use it in the scheduler") this was made more explicit by the addition of switch_mm_irqs_off() which is now called by the scheduler, vs switch_mm() which is used by use_mm(). Arguably it is a bug in use_mm() to call switch_mm() in a different context than it expects, but fixing that will take time. This was discovered recently when vhost started throwing warnings such as: BUG: sleeping function called from invalid context at kernel/mutex.c:578 in_atomic(): 0, irqs_disabled(): 1, pid: 10768, name: vhost-10760 no locks held by vhost-10760/10768. irq event stamp: 10 hardirqs last enabled at (9): _raw_spin_unlock_irq+0x40/0x80 hardirqs last disabled at (10): switch_slb+0x2e4/0x490 softirqs last enabled at (0): copy_process+0x5e8/0x1260 softirqs last disabled at (0): (null) Call Trace: show_stack+0x88/0x390 (unreliable) dump_stack+0x30/0x44 __might_sleep+0x1c4/0x2d0 mutex_lock_nested+0x74/0x5c0 cgroup_attach_task_all+0x5c/0x180 vhost_attach_cgroups_work+0x58/0x80 [vhost] vhost_worker+0x24c/0x3d0 [vhost] kthread+0xec/0x100 ret_from_kernel_thread+0x5c/0xd4 Prior to commit04b96e5528
("vhost: lockless enqueuing") (Aug 2016) the vhost_worker() would do a spin_unlock_irq() not long after calling use_mm(), which had the effect of reenabling IRQs. Since that commit removed the locking in vhost_worker() the body of the vhost_worker() loop now runs with interrupts off causing the warnings. This patch addresses the problem by making the powerpc code mirror the x86 code, ie. we disable interrupts in switch_mm(), and optimise the scheduler case by defining switch_mm_irqs_off(). Cc: stable@vger.kernel.org # v4.7+ Signed-off-by: David Gibson <david@gibson.dropbear.id.au> [mpe: Flesh out/rewrite change log, add stable] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
188 lines
5.5 KiB
C
188 lines
5.5 KiB
C
#ifndef __ASM_POWERPC_MMU_CONTEXT_H
|
|
#define __ASM_POWERPC_MMU_CONTEXT_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/spinlock.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/cputhreads.h>
|
|
|
|
/*
|
|
* Most if the context management is out of line
|
|
*/
|
|
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
|
extern void destroy_context(struct mm_struct *mm);
|
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
|
struct mm_iommu_table_group_mem_t;
|
|
|
|
extern int isolate_lru_page(struct page *page); /* from internal.h */
|
|
extern bool mm_iommu_preregistered(struct mm_struct *mm);
|
|
extern long mm_iommu_get(struct mm_struct *mm,
|
|
unsigned long ua, unsigned long entries,
|
|
struct mm_iommu_table_group_mem_t **pmem);
|
|
extern long mm_iommu_put(struct mm_struct *mm,
|
|
struct mm_iommu_table_group_mem_t *mem);
|
|
extern void mm_iommu_init(struct mm_struct *mm);
|
|
extern void mm_iommu_cleanup(struct mm_struct *mm);
|
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
|
unsigned long ua, unsigned long size);
|
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
|
unsigned long ua, unsigned long entries);
|
|
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|
unsigned long ua, unsigned long *hpa);
|
|
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
|
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
|
#endif
|
|
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
|
|
extern void set_context(unsigned long id, pgd_t *pgd);
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
extern void radix__switch_mmu_context(struct mm_struct *prev,
|
|
struct mm_struct *next);
|
|
static inline void switch_mmu_context(struct mm_struct *prev,
|
|
struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__switch_mmu_context(prev, next);
|
|
return switch_slb(tsk, next);
|
|
}
|
|
|
|
extern int hash__alloc_context_id(void);
|
|
extern void hash__reserve_context_id(int id);
|
|
extern void __destroy_context(int context_id);
|
|
static inline void mmu_context_init(void) { }
|
|
#else
|
|
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk);
|
|
extern unsigned long __init_new_context(void);
|
|
extern void __destroy_context(unsigned long context_id);
|
|
extern void mmu_context_init(void);
|
|
#endif
|
|
|
|
extern void switch_cop(struct mm_struct *next);
|
|
extern int use_cop(unsigned long acop, struct mm_struct *mm);
|
|
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
|
|
|
|
/*
|
|
* switch_mm is the entry point called from the architecture independent
|
|
* code in kernel/sched/core.c
|
|
*/
|
|
static inline void switch_mm_irqs_off(struct mm_struct *prev,
|
|
struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
/* Mark this context has been used on the new CPU */
|
|
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
|
|
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
|
|
|
/* 32-bit keeps track of the current PGDIR in the thread struct */
|
|
#ifdef CONFIG_PPC32
|
|
tsk->thread.pgdir = next->pgd;
|
|
#endif /* CONFIG_PPC32 */
|
|
|
|
/* 64-bit Book3E keeps track of current PGD in the PACA */
|
|
#ifdef CONFIG_PPC_BOOK3E_64
|
|
get_paca()->pgd = next->pgd;
|
|
#endif
|
|
/* Nothing else to do if we aren't actually switching */
|
|
if (prev == next)
|
|
return;
|
|
|
|
#ifdef CONFIG_PPC_ICSWX
|
|
/* Switch coprocessor context only if prev or next uses a coprocessor */
|
|
if (prev->context.acop || next->context.acop)
|
|
switch_cop(next);
|
|
#endif /* CONFIG_PPC_ICSWX */
|
|
|
|
/* We must stop all altivec streams before changing the HW
|
|
* context
|
|
*/
|
|
#ifdef CONFIG_ALTIVEC
|
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
asm volatile ("dssall");
|
|
#endif /* CONFIG_ALTIVEC */
|
|
/*
|
|
* The actual HW switching method differs between the various
|
|
* sub architectures. Out of line for now
|
|
*/
|
|
switch_mmu_context(prev, next, tsk);
|
|
}
|
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
switch_mm_irqs_off(prev, next, tsk);
|
|
local_irq_restore(flags);
|
|
}
|
|
#define switch_mm_irqs_off switch_mm_irqs_off
|
|
|
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
/*
|
|
* After we have set current->mm to a new value, this activates
|
|
* the context for the new mm so we see the new mappings.
|
|
*/
|
|
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
switch_mm(prev, next, current);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/* We don't currently use enter_lazy_tlb() for anything */
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|
struct task_struct *tsk)
|
|
{
|
|
/* 64-bit Book3E keeps track of current PGD in the PACA */
|
|
#ifdef CONFIG_PPC_BOOK3E_64
|
|
get_paca()->pgd = NULL;
|
|
#endif
|
|
}
|
|
|
|
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
|
struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void arch_unmap(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
|
|
mm->context.vdso_base = 0;
|
|
}
|
|
|
|
static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
|
bool write, bool execute, bool foreign)
|
|
{
|
|
/* by default, allow everything */
|
|
return true;
|
|
}
|
|
|
|
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
|
|
{
|
|
/* by default, allow everything */
|
|
return true;
|
|
}
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
|