Merge tag 'powerpc-5.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "One fix for a boot hang on some Freescale machines when PREEMPT is enabled. Two CVE fixes for bugs in our handling of FP registers and transactional memory, both of which can result in corrupted FP state, or FP state leaking between processes. Thanks to: Chris Packham, Christophe Leroy, Gustavo Romero, Michael Neuling" * tag 'powerpc-5.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/tm: Fix restoring FP/VMX facility incorrectly on interrupts powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction powerpc/64e: Drop stale call to smp_processor_id() which hangs SMP startup
This commit is contained in:
@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
static bool tm_active_with_fp(struct task_struct *tsk)
|
||||
{
|
||||
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
||||
(tsk->thread.ckpt_regs.msr & MSR_FP);
|
||||
}
|
||||
|
||||
static bool tm_active_with_altivec(struct task_struct *tsk)
|
||||
{
|
||||
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
||||
(tsk->thread.ckpt_regs.msr & MSR_VEC);
|
||||
}
|
||||
#else
|
||||
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
|
||||
static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
|
||||
static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
bool strict_msr_control;
|
||||
@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
|
||||
|
||||
static int restore_fp(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
|
||||
if (tsk->thread.load_fp) {
|
||||
load_fp_state(¤t->thread.fp_state);
|
||||
current->thread.load_fp++;
|
||||
return 1;
|
||||
@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
||||
|
||||
static int restore_altivec(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
|
||||
(tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
|
||||
load_vr_state(&tsk->thread.vr_state);
|
||||
tsk->thread.used_vr = 1;
|
||||
tsk->thread.load_vec++;
|
||||
@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
|
||||
if (!tsk->thread.regs)
|
||||
return;
|
||||
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
usermsr = tsk->thread.regs->msr;
|
||||
|
||||
if ((usermsr & msr_all_available) == 0)
|
||||
return;
|
||||
|
||||
msr_check_and_set(msr_all_available);
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
|
||||
|
||||
|
@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
|
||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned int num_cams;
|
||||
int __maybe_unused cpu = smp_processor_id();
|
||||
bool map = true;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
|
Reference in New Issue
Block a user