ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs
Since the ASIDs must be unique to an mm across all the CPUs in a system, the __new_context() function needs to broadcast a context reset event to all the CPUs during ASID allocation if a roll-over occurred. Such IPIs cannot be issued with interrupts disabled and ARM had to define __ARCH_WANT_INTERRUPTS_ON_CTXSW. This patch changes the check_context() function to check_and_switch_context() called from switch_mm(). In case of ASID-capable CPUs (ARMv6 onwards), if a new ASID is needed and the interrupts are disabled, it defers the __new_context() and cpu_switch_mm() calls to the post-lock switch hook where the interrupts are enabled. Setting the reserved TTBR0 was also moved to check_and_switch_context() from cpu_v7_switch_mm(). Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Tested-by: Marc Zyngier <Marc.Zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
@@ -23,7 +23,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
static void cpu_set_reserved_ttbr0(void)
|
||||
void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
unsigned long ttbl = __pa(swapper_pg_dir);
|
||||
unsigned long ttbh = 0;
|
||||
@@ -39,7 +39,7 @@ static void cpu_set_reserved_ttbr0(void)
|
||||
isb();
|
||||
}
|
||||
#else
|
||||
static void cpu_set_reserved_ttbr0(void)
|
||||
void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
u32 ttb;
|
||||
/* Copy TTBR1 into TTBR0 */
|
||||
|
Reference in New Issue
Block a user