Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "MIPS updates: - All the things that didn't make 3.10. - Removes the Windriver PPMC platform. Nobody will miss it. - Remove a workaround from kernel/irq/irqdomain.c which was there exclusivly for MIPS. Patch by Grant Likely. - More small improvments for the SEAD 3 platform - Improvments on the BMIPS / SMP support for the BCM63xx series. - Various cleanups of dead leftovers. - Platform support for the Cavium Octeon-based EdgeRouter Lite. Two large KVM patchsets didn't make it for this pull request because their respective authors are vacationing" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (124 commits) MIPS: Kconfig: Add missing MODULES dependency to VPE_LOADER MIPS: BCM63xx: CLK: Add dummy clk_{set,round}_rate() functions MIPS: SEAD3: Disable L2 cache on SEAD-3. MIPS: BCM63xx: Enable second core SMP on BCM6328 if available MIPS: BCM63xx: Add SMP support to prom.c MIPS: define write{b,w,l,q}_relaxed MIPS: Expose missing pci_io{map,unmap} declarations MIPS: Malta: Update GCMP detection. Revert "MIPS: make CAC_ADDR and UNCAC_ADDR account for PHYS_OFFSET" MIPS: APSP: Remove <asm/kspd.h> SSB: Kconfig: Amend SSB_EMBEDDED dependencies MIPS: microMIPS: Fix improper definition of ISA exception bit. MIPS: Don't try to decode microMIPS branch instructions where they cannot exist. MIPS: Declare emulate_load_store_microMIPS as a static function. MIPS: Fix typos and cleanup comment MIPS: Cleanup indentation and whitespace MIPS: BMIPS: support booting from physical CPU other than 0 MIPS: Only set cpu_has_mmips if SYS_SUPPORTS_MICROMIPS MIPS: GIC: Fix gic_set_affinity infinite loop MIPS: Don't save/restore OCTEON wide multiplier state on syscalls. ...
This commit is contained in:
@@ -82,6 +82,9 @@ void output_task_defines(void)
|
||||
OFFSET(TASK_FLAGS, task_struct, flags);
|
||||
OFFSET(TASK_MM, task_struct, mm);
|
||||
OFFSET(TASK_PID, task_struct, pid);
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR)
|
||||
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
|
||||
#endif
|
||||
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
|
||||
BLANK();
|
||||
}
|
||||
|
@@ -467,5 +467,4 @@ unaligned:
|
||||
printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
|
||||
force_sig(SIGBUS, current);
|
||||
return -EFAULT;
|
||||
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ptrace.h>
|
||||
@@ -171,8 +172,12 @@ static volatile int daddi_ov __cpuinitdata;
|
||||
|
||||
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
daddi_ov = 1;
|
||||
regs->cp0_epc += 4;
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
static inline void check_daddi(void)
|
||||
|
@@ -146,8 +146,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
|
||||
case MIPS_CPU_ISA_IV:
|
||||
c->isa_level |= MIPS_CPU_ISA_IV;
|
||||
case MIPS_CPU_ISA_III:
|
||||
c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II |
|
||||
MIPS_CPU_ISA_III;
|
||||
c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
|
||||
break;
|
||||
|
||||
case MIPS_CPU_ISA_M32R2:
|
||||
@@ -156,8 +155,6 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
|
||||
c->isa_level |= MIPS_CPU_ISA_M32R1;
|
||||
case MIPS_CPU_ISA_II:
|
||||
c->isa_level |= MIPS_CPU_ISA_II;
|
||||
case MIPS_CPU_ISA_I:
|
||||
c->isa_level |= MIPS_CPU_ISA_I;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -272,9 +269,6 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
|
||||
c->options |= MIPS_CPU_ULRI;
|
||||
if (config3 & MIPS_CONF3_ISA)
|
||||
c->options |= MIPS_CPU_MICROMIPS;
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
|
||||
#endif
|
||||
if (config3 & MIPS_CONF3_VZ)
|
||||
c->ases |= MIPS_ASE_VZ;
|
||||
|
||||
@@ -332,7 +326,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
case PRID_IMP_R2000:
|
||||
c->cputype = CPU_R2000;
|
||||
__cpu_name[cpu] = "R2000";
|
||||
set_isa(c, MIPS_CPU_ISA_I);
|
||||
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
|
||||
MIPS_CPU_NOFPUEX;
|
||||
if (__cpu_has_fpu())
|
||||
@@ -352,7 +345,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
c->cputype = CPU_R3000;
|
||||
__cpu_name[cpu] = "R3000";
|
||||
}
|
||||
set_isa(c, MIPS_CPU_ISA_I);
|
||||
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
|
||||
MIPS_CPU_NOFPUEX;
|
||||
if (__cpu_has_fpu())
|
||||
@@ -455,7 +447,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
break;
|
||||
#endif
|
||||
case PRID_IMP_TX39:
|
||||
set_isa(c, MIPS_CPU_ISA_I);
|
||||
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
|
||||
|
||||
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
|
||||
@@ -959,6 +950,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
|
||||
set_isa(c, MIPS_CPU_ISA_M64R1);
|
||||
c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
|
||||
}
|
||||
c->kscratch_mask = 0xf;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
|
@@ -27,45 +27,6 @@
|
||||
|
||||
#include <kernel-entry-init.h>
|
||||
|
||||
/*
|
||||
* inputs are the text nasid in t1, data nasid in t2.
|
||||
*/
|
||||
.macro MAPPED_KERNEL_SETUP_TLB
|
||||
#ifdef CONFIG_MAPPED_KERNEL
|
||||
/*
|
||||
* This needs to read the nasid - assume 0 for now.
|
||||
* Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
|
||||
* 0+DVG in tlblo_1.
|
||||
*/
|
||||
dli t0, 0xffffffffc0000000
|
||||
dmtc0 t0, CP0_ENTRYHI
|
||||
li t0, 0x1c000 # Offset of text into node memory
|
||||
dsll t1, NASID_SHFT # Shift text nasid into place
|
||||
dsll t2, NASID_SHFT # Same for data nasid
|
||||
or t1, t1, t0 # Physical load address of kernel text
|
||||
or t2, t2, t0 # Physical load address of kernel data
|
||||
dsrl t1, 12 # 4K pfn
|
||||
dsrl t2, 12 # 4K pfn
|
||||
dsll t1, 6 # Get pfn into place
|
||||
dsll t2, 6 # Get pfn into place
|
||||
li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6)
|
||||
or t0, t0, t1
|
||||
mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
|
||||
li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6)
|
||||
or t0, t0, t2
|
||||
mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
|
||||
li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
|
||||
mtc0 t0, CP0_PAGEMASK
|
||||
li t0, 0 # KMAP_INX
|
||||
mtc0 t0, CP0_INDEX
|
||||
li t0, 1
|
||||
mtc0 t0, CP0_WIRED
|
||||
tlbwi
|
||||
#else
|
||||
mtc0 zero, CP0_WIRED
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* For the moment disable interrupts, mark the kernel mode and
|
||||
* set ST0_KX so that the CPU does not spit fire when using
|
||||
|
@@ -219,16 +219,15 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||
|
||||
/* Assumption : cpumask refers to a single CPU */
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
for (;;) {
|
||||
/* Re-route this IRQ */
|
||||
GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
|
||||
|
||||
/* Update the pcpu_masks */
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
clear_bit(irq, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
|
||||
/* Re-route this IRQ */
|
||||
GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
|
||||
|
||||
/* Update the pcpu_masks */
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
clear_bit(irq, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
|
||||
|
||||
}
|
||||
cpumask_copy(d->affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
|
@@ -168,14 +168,10 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
|
||||
#endif
|
||||
|
||||
/* arg3: Get frame pointer of current stack */
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
move a2, fp
|
||||
#else /* ! CONFIG_FRAME_POINTER */
|
||||
#ifdef CONFIG_64BIT
|
||||
PTR_LA a2, PT_SIZE(sp)
|
||||
#else
|
||||
PTR_LA a2, (PT_SIZE+8)(sp)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
jal prepare_ftrace_return
|
||||
|
@@ -40,33 +40,6 @@
|
||||
cpu_save_nonscratch a0
|
||||
LONG_S ra, THREAD_REG31(a0)
|
||||
|
||||
/* check if we need to save COP2 registers */
|
||||
PTR_L t2, TASK_THREAD_INFO(a0)
|
||||
LONG_L t0, ST_OFF(t2)
|
||||
bbit0 t0, 30, 1f
|
||||
|
||||
/* Disable COP2 in the stored process state */
|
||||
li t1, ST0_CU2
|
||||
xor t0, t1
|
||||
LONG_S t0, ST_OFF(t2)
|
||||
|
||||
/* Enable COP2 so we can save it */
|
||||
mfc0 t0, CP0_STATUS
|
||||
or t0, t1
|
||||
mtc0 t0, CP0_STATUS
|
||||
|
||||
/* Save COP2 */
|
||||
daddu a0, THREAD_CP2
|
||||
jal octeon_cop2_save
|
||||
dsubu a0, THREAD_CP2
|
||||
|
||||
/* Disable COP2 now that we are done */
|
||||
mfc0 t0, CP0_STATUS
|
||||
li t1, ST0_CU2
|
||||
xor t0, t1
|
||||
mtc0 t0, CP0_STATUS
|
||||
|
||||
1:
|
||||
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
|
||||
/* Check if we need to store CVMSEG state */
|
||||
mfc0 t0, $11,7 /* CvmMemCtl */
|
||||
@@ -98,6 +71,13 @@
|
||||
mtc0 t0, $11,7 /* CvmMemCtl */
|
||||
#endif
|
||||
3:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The order of restoring the registers takes care of the race
|
||||
* updating $28, $29 and kernelsp without disabling ints.
|
||||
|
@@ -66,9 +66,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_printf(m, "]\n");
|
||||
}
|
||||
if (cpu_has_mips_r) {
|
||||
seq_printf(m, "isa\t\t\t:");
|
||||
if (cpu_has_mips_1)
|
||||
seq_printf(m, "%s", " mips1");
|
||||
seq_printf(m, "isa\t\t\t: mips1");
|
||||
if (cpu_has_mips_2)
|
||||
seq_printf(m, "%s", " mips2");
|
||||
if (cpu_has_mips_3)
|
||||
|
@@ -201,9 +201,12 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
struct mips_frame_info {
|
||||
void *func;
|
||||
unsigned long func_size;
|
||||
|
@@ -30,7 +30,7 @@ __init void mips_set_machine_name(const char *name)
|
||||
if (name == NULL)
|
||||
return;
|
||||
|
||||
strncpy(mips_machine_name, name, sizeof(mips_machine_name));
|
||||
strlcpy(mips_machine_name, name, sizeof(mips_machine_name));
|
||||
pr_info("MIPS: machine is %s\n", mips_get_machine_name());
|
||||
}
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
* binaries.
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -534,6 +535,8 @@ static inline int audit_arch(void)
|
||||
*/
|
||||
asmlinkage void syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
user_exit();
|
||||
|
||||
/* do the secure computing check first */
|
||||
secure_computing_strict(regs->regs[2]);
|
||||
|
||||
@@ -570,6 +573,13 @@ out:
|
||||
*/
|
||||
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* We may come here right after calling schedule_user()
|
||||
* or do_notify_resume(), in which case we can be in RCU
|
||||
* user mode.
|
||||
*/
|
||||
user_exit();
|
||||
|
||||
audit_syscall_exit(regs);
|
||||
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
@@ -592,4 +602,6 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
@@ -65,6 +65,13 @@ LEAF(resume)
|
||||
fpu_save_single a0, t0 # clobbers t0
|
||||
|
||||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The order of restoring the registers takes care of the race
|
||||
* updating $28, $29 and kernelsp without disabling ints.
|
||||
|
@@ -68,6 +68,12 @@
|
||||
# clobbers t1
|
||||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The order of restoring the registers takes care of the race
|
||||
* updating $28, $29 and kernelsp without disabling ints.
|
||||
|
@@ -437,7 +437,6 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
|
||||
size_t count, loff_t * ppos)
|
||||
{
|
||||
int minor = iminor(file_inode(file));
|
||||
struct rtlx_channel *rt = &rtlx->channel[minor];
|
||||
|
||||
/* any space left... */
|
||||
if (!rtlx_write_poll(minor)) {
|
||||
|
@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
|
||||
|
||||
stack_done:
|
||||
lw t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
and t0, t1
|
||||
bnez t0, syscall_trace_entry # -> yes
|
||||
|
||||
|
@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
|
||||
|
||||
sd a3, PT_R26(sp) # save a3 for syscall restarting
|
||||
|
||||
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
and t0, t1, t0
|
||||
bnez t0, syscall_trace_entry
|
||||
|
@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
|
||||
|
||||
sd a3, PT_R26(sp) # save a3 for syscall restarting
|
||||
|
||||
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
and t0, t1, t0
|
||||
bnez t0, n32_syscall_trace_entry
|
||||
|
@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
|
||||
PTR 4b, bad_stack
|
||||
.previous
|
||||
|
||||
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
and t0, t1, t0
|
||||
bnez t0, trace_a_syscall
|
||||
|
@@ -8,6 +8,7 @@
|
||||
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
||||
*/
|
||||
#include <linux/cache.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -573,6 +574,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
{
|
||||
local_irq_enable();
|
||||
|
||||
user_exit();
|
||||
|
||||
/* deal with pending signal delivery */
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs);
|
||||
@@ -581,6 +584,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@@ -63,7 +63,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id);
|
||||
|
||||
static void __init bmips_smp_setup(void)
|
||||
{
|
||||
int i;
|
||||
int i, cpu = 1, boot_cpu = 0;
|
||||
|
||||
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
|
||||
/* arbitration priority */
|
||||
@@ -72,13 +72,22 @@ static void __init bmips_smp_setup(void)
|
||||
/* NBK and weak order flags */
|
||||
set_c0_brcm_config_0(0x30000);
|
||||
|
||||
/* Find out if we are running on TP0 or TP1 */
|
||||
boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
|
||||
|
||||
/*
|
||||
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
|
||||
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
|
||||
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
|
||||
*
|
||||
* If booting from TP1, leave the existing CMT interrupt routing
|
||||
* such that TP0 responds to SW1 and TP1 responds to SW0.
|
||||
*/
|
||||
change_c0_brcm_cmt_intr(0xf8018000,
|
||||
(0x02 << 27) | (0x03 << 15));
|
||||
if (boot_cpu == 0)
|
||||
change_c0_brcm_cmt_intr(0xf8018000,
|
||||
(0x02 << 27) | (0x03 << 15));
|
||||
else
|
||||
change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27));
|
||||
|
||||
/* single core, 2 threads (2 pipelines) */
|
||||
max_cpus = 2;
|
||||
@@ -106,9 +115,15 @@ static void __init bmips_smp_setup(void)
|
||||
if (!board_ebase_setup)
|
||||
board_ebase_setup = &bmips_ebase_setup;
|
||||
|
||||
__cpu_number_map[boot_cpu] = 0;
|
||||
__cpu_logical_map[0] = boot_cpu;
|
||||
|
||||
for (i = 0; i < max_cpus; i++) {
|
||||
__cpu_number_map[i] = 1;
|
||||
__cpu_logical_map[i] = 1;
|
||||
if (i != boot_cpu) {
|
||||
__cpu_number_map[i] = cpu;
|
||||
__cpu_logical_map[cpu] = i;
|
||||
cpu++;
|
||||
}
|
||||
set_cpu_possible(i, 1);
|
||||
set_cpu_present(i, 1);
|
||||
}
|
||||
@@ -157,7 +172,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
|
||||
bmips_send_ipi_single(cpu, 0);
|
||||
else {
|
||||
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
|
||||
set_c0_brcm_cmt_ctrl(0x01);
|
||||
/* Reset slave TP1 if booting from TP0 */
|
||||
if (cpu_logical_map(cpu) == 0)
|
||||
set_c0_brcm_cmt_ctrl(0x01);
|
||||
#elif defined(CONFIG_CPU_BMIPS5000)
|
||||
if (cpu & 0x01)
|
||||
write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
|
||||
|
@@ -13,6 +13,7 @@
|
||||
*/
|
||||
#include <linux/bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
@@ -264,7 +265,7 @@ static void __show_regs(const struct pt_regs *regs)
|
||||
|
||||
printk("Status: %08x ", (uint32_t) regs->cp0_status);
|
||||
|
||||
if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
|
||||
if (cpu_has_3kex) {
|
||||
if (regs->cp0_status & ST0_KUO)
|
||||
printk("KUo ");
|
||||
if (regs->cp0_status & ST0_IEO)
|
||||
@@ -277,7 +278,7 @@ static void __show_regs(const struct pt_regs *regs)
|
||||
printk("KUc ");
|
||||
if (regs->cp0_status & ST0_IEC)
|
||||
printk("IEc ");
|
||||
} else {
|
||||
} else if (cpu_has_4kex) {
|
||||
if (regs->cp0_status & ST0_KX)
|
||||
printk("KX ");
|
||||
if (regs->cp0_status & ST0_SX)
|
||||
@@ -423,7 +424,9 @@ asmlinkage void do_be(struct pt_regs *regs)
|
||||
const struct exception_table_entry *fixup = NULL;
|
||||
int data = regs->cp0_cause & 4;
|
||||
int action = MIPS_BE_FATAL;
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
/* XXX For now. Fixme, this searches the wrong table ... */
|
||||
if (data && !user_mode(regs))
|
||||
fixup = search_dbe_tables(exception_epc(regs));
|
||||
@@ -436,11 +439,11 @@ asmlinkage void do_be(struct pt_regs *regs)
|
||||
|
||||
switch (action) {
|
||||
case MIPS_BE_DISCARD:
|
||||
return;
|
||||
goto out;
|
||||
case MIPS_BE_FIXUP:
|
||||
if (fixup) {
|
||||
regs->cp0_epc = fixup->nextinsn;
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -455,10 +458,13 @@ asmlinkage void do_be(struct pt_regs *regs)
|
||||
field, regs->cp0_epc, field, regs->regs[31]);
|
||||
if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
|
||||
== NOTIFY_STOP)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
die_if_kernel("Oops", regs);
|
||||
force_sig(SIGBUS, current);
|
||||
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -673,8 +679,10 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
|
||||
|
||||
asmlinkage void do_ov(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
siginfo_t info;
|
||||
|
||||
prev_state = exception_enter();
|
||||
die_if_kernel("Integer overflow", regs);
|
||||
|
||||
info.si_code = FPE_INTOVF;
|
||||
@@ -682,6 +690,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
|
||||
info.si_errno = 0;
|
||||
info.si_addr = (void __user *) regs->cp0_epc;
|
||||
force_sig_info(SIGFPE, &info, current);
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
int process_fpemu_return(int sig, void __user *fault_addr)
|
||||
@@ -713,11 +722,13 @@ int process_fpemu_return(int sig, void __user *fault_addr)
|
||||
*/
|
||||
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
siginfo_t info = {0};
|
||||
|
||||
prev_state = exception_enter();
|
||||
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
|
||||
== NOTIFY_STOP)
|
||||
return;
|
||||
goto out;
|
||||
die_if_kernel("FP exception in kernel code", regs);
|
||||
|
||||
if (fcr31 & FPU_CSR_UNI_X) {
|
||||
@@ -753,7 +764,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
||||
/* If something went wrong, signal */
|
||||
process_fpemu_return(sig, fault_addr);
|
||||
|
||||
return;
|
||||
goto out;
|
||||
} else if (fcr31 & FPU_CSR_INV_X)
|
||||
info.si_code = FPE_FLTINV;
|
||||
else if (fcr31 & FPU_CSR_DIV_X)
|
||||
@@ -770,6 +781,9 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
||||
info.si_errno = 0;
|
||||
info.si_addr = (void __user *) regs->cp0_epc;
|
||||
force_sig_info(SIGFPE, &info, current);
|
||||
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
|
||||
@@ -835,9 +849,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
|
||||
asmlinkage void do_bp(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int opcode, bcode;
|
||||
enum ctx_state prev_state;
|
||||
unsigned long epc;
|
||||
u16 instr[2];
|
||||
|
||||
prev_state = exception_enter();
|
||||
if (get_isa16_mode(regs->cp0_epc)) {
|
||||
/* Calculate EPC. */
|
||||
epc = exception_epc(regs);
|
||||
@@ -852,7 +868,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
|
||||
goto out_sigsegv;
|
||||
bcode = (instr[0] >> 6) & 0x3f;
|
||||
do_trap_or_bp(regs, bcode, "Break");
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
|
||||
@@ -876,12 +892,12 @@ asmlinkage void do_bp(struct pt_regs *regs)
|
||||
switch (bcode) {
|
||||
case BRK_KPROBE_BP:
|
||||
if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
|
||||
return;
|
||||
goto out;
|
||||
else
|
||||
break;
|
||||
case BRK_KPROBE_SSTEPBP:
|
||||
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
|
||||
return;
|
||||
goto out;
|
||||
else
|
||||
break;
|
||||
default:
|
||||
@@ -889,18 +905,24 @@ asmlinkage void do_bp(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
do_trap_or_bp(regs, bcode, "Break");
|
||||
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
return;
|
||||
|
||||
out_sigsegv:
|
||||
force_sig(SIGSEGV, current);
|
||||
goto out;
|
||||
}
|
||||
|
||||
asmlinkage void do_tr(struct pt_regs *regs)
|
||||
{
|
||||
u32 opcode, tcode = 0;
|
||||
enum ctx_state prev_state;
|
||||
u16 instr[2];
|
||||
unsigned long epc = msk_isa16_mode(exception_epc(regs));
|
||||
|
||||
prev_state = exception_enter();
|
||||
if (get_isa16_mode(regs->cp0_epc)) {
|
||||
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
|
||||
__get_user(instr[1], (u16 __user *)(epc + 2)))
|
||||
@@ -918,10 +940,14 @@ asmlinkage void do_tr(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
do_trap_or_bp(regs, tcode, "Trap");
|
||||
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
return;
|
||||
|
||||
out_sigsegv:
|
||||
force_sig(SIGSEGV, current);
|
||||
goto out;
|
||||
}
|
||||
|
||||
asmlinkage void do_ri(struct pt_regs *regs)
|
||||
@@ -929,17 +955,19 @@ asmlinkage void do_ri(struct pt_regs *regs)
|
||||
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
|
||||
unsigned long old_epc = regs->cp0_epc;
|
||||
unsigned long old31 = regs->regs[31];
|
||||
enum ctx_state prev_state;
|
||||
unsigned int opcode = 0;
|
||||
int status = -1;
|
||||
|
||||
prev_state = exception_enter();
|
||||
if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
|
||||
== NOTIFY_STOP)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
die_if_kernel("Reserved instruction in kernel code", regs);
|
||||
|
||||
if (unlikely(compute_return_epc(regs) < 0))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
if (get_isa16_mode(regs->cp0_epc)) {
|
||||
unsigned short mmop[2] = { 0 };
|
||||
@@ -974,6 +1002,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
|
||||
regs->regs[31] = old31;
|
||||
force_sig(status, current);
|
||||
}
|
||||
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1025,21 +1056,16 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
{
|
||||
struct pt_regs *regs = data;
|
||||
|
||||
switch (action) {
|
||||
default:
|
||||
die_if_kernel("Unhandled kernel unaligned access or invalid "
|
||||
die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
|
||||
"instruction", regs);
|
||||
/* Fall through */
|
||||
|
||||
case CU2_EXCEPTION:
|
||||
force_sig(SIGILL, current);
|
||||
}
|
||||
force_sig(SIGILL, current);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
asmlinkage void do_cpu(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
unsigned int __user *epc;
|
||||
unsigned long old_epc, old31;
|
||||
unsigned int opcode;
|
||||
@@ -1047,10 +1073,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
||||
int status;
|
||||
unsigned long __maybe_unused flags;
|
||||
|
||||
die_if_kernel("do_cpu invoked from kernel context!", regs);
|
||||
|
||||
prev_state = exception_enter();
|
||||
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
|
||||
|
||||
if (cpid != 2)
|
||||
die_if_kernel("do_cpu invoked from kernel context!", regs);
|
||||
|
||||
switch (cpid) {
|
||||
case 0:
|
||||
epc = (unsigned int __user *)exception_epc(regs);
|
||||
@@ -1060,7 +1088,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
||||
status = -1;
|
||||
|
||||
if (unlikely(compute_return_epc(regs) < 0))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
if (get_isa16_mode(regs->cp0_epc)) {
|
||||
unsigned short mmop[2] = { 0 };
|
||||
@@ -1093,7 +1121,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
||||
force_sig(status, current);
|
||||
}
|
||||
|
||||
return;
|
||||
goto out;
|
||||
|
||||
case 3:
|
||||
/*
|
||||
@@ -1131,19 +1159,26 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
||||
mt_ase_fp_affinity();
|
||||
}
|
||||
|
||||
return;
|
||||
goto out;
|
||||
|
||||
case 2:
|
||||
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
force_sig(SIGILL, current);
|
||||
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
asmlinkage void do_mdmx(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
force_sig(SIGILL, current);
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1151,8 +1186,10 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
|
||||
*/
|
||||
asmlinkage void do_watch(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
u32 cause;
|
||||
|
||||
prev_state = exception_enter();
|
||||
/*
|
||||
* Clear WP (bit 22) bit of cause register so we don't loop
|
||||
* forever.
|
||||
@@ -1174,13 +1211,16 @@ asmlinkage void do_watch(struct pt_regs *regs)
|
||||
mips_clear_watch_registers();
|
||||
local_irq_enable();
|
||||
}
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
asmlinkage void do_mcheck(struct pt_regs *regs)
|
||||
{
|
||||
const int field = 2 * sizeof(unsigned long);
|
||||
int multi_match = regs->cp0_status & ST0_TS;
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
show_regs(regs);
|
||||
|
||||
if (multi_match) {
|
||||
@@ -1202,6 +1242,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
|
||||
panic("Caught Machine Check exception - %scaused by multiple "
|
||||
"matching entries in the TLB.",
|
||||
(multi_match) ? "" : "not ");
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
asmlinkage void do_mt(struct pt_regs *regs)
|
||||
@@ -1627,7 +1668,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
|
||||
}
|
||||
|
||||
extern void tlb_init(void);
|
||||
extern void flush_tlb_handlers(void);
|
||||
|
||||
/*
|
||||
* Timer interrupt
|
||||
@@ -1837,6 +1877,15 @@ void __init trap_init(void)
|
||||
ebase += (read_c0_ebase() & 0x3ffff000);
|
||||
}
|
||||
|
||||
if (cpu_has_mmips) {
|
||||
unsigned int config3 = read_c0_config3();
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
|
||||
write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
|
||||
else
|
||||
write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
|
||||
}
|
||||
|
||||
if (board_ebase_setup)
|
||||
board_ebase_setup();
|
||||
per_cpu_trap_init(true);
|
||||
@@ -1956,7 +2005,6 @@ void __init trap_init(void)
|
||||
set_handler(0x080, &except_vec3_generic, 0x80);
|
||||
|
||||
local_flush_icache_range(ebase, ebase + 0x400);
|
||||
flush_tlb_handlers();
|
||||
|
||||
sort_extable(__start___dbe_table, __stop___dbe_table);
|
||||
|
||||
|
@@ -72,6 +72,7 @@
|
||||
* A store crossing a page boundary might be executed only partially.
|
||||
* Undo the partial store in this case.
|
||||
*/
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/smp.h>
|
||||
@@ -684,7 +685,8 @@ const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
|
||||
/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
|
||||
const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
|
||||
|
||||
void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
|
||||
static void emulate_load_store_microMIPS(struct pt_regs *regs,
|
||||
void __user *addr)
|
||||
{
|
||||
unsigned long value;
|
||||
unsigned int res;
|
||||
@@ -1548,11 +1550,14 @@ sigill:
|
||||
("Unhandled kernel unaligned access or invalid instruction", regs);
|
||||
force_sig(SIGILL, current);
|
||||
}
|
||||
|
||||
asmlinkage void do_ade(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
unsigned int __user *pc;
|
||||
mm_segment_t seg;
|
||||
|
||||
prev_state = exception_enter();
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
|
||||
1, regs, regs->cp0_badvaddr);
|
||||
/*
|
||||
@@ -1628,6 +1633,7 @@ sigbus:
|
||||
/*
|
||||
* XXX On return from the signal handler we should advance the epc
|
||||
*/
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@@ -111,6 +111,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
|
||||
* disable the register.
|
||||
*/
|
||||
write_c0_watchlo0(7);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchlo0();
|
||||
write_c0_watchlo0(0);
|
||||
c->watch_reg_masks[0] = t & 7;
|
||||
@@ -121,12 +122,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
|
||||
c->watch_reg_use_cnt = 1;
|
||||
t = read_c0_watchhi0();
|
||||
write_c0_watchhi0(t | 0xff8);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchhi0();
|
||||
c->watch_reg_masks[0] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
write_c0_watchlo1(7);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchlo1();
|
||||
write_c0_watchlo1(0);
|
||||
c->watch_reg_masks[1] = t & 7;
|
||||
@@ -135,12 +138,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
|
||||
c->watch_reg_use_cnt = 2;
|
||||
t = read_c0_watchhi1();
|
||||
write_c0_watchhi1(t | 0xff8);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchhi1();
|
||||
c->watch_reg_masks[1] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
write_c0_watchlo2(7);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchlo2();
|
||||
write_c0_watchlo2(0);
|
||||
c->watch_reg_masks[2] = t & 7;
|
||||
@@ -149,12 +154,14 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
|
||||
c->watch_reg_use_cnt = 3;
|
||||
t = read_c0_watchhi2();
|
||||
write_c0_watchhi2(t | 0xff8);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchhi2();
|
||||
c->watch_reg_masks[2] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
write_c0_watchlo3(7);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchlo3();
|
||||
write_c0_watchlo3(0);
|
||||
c->watch_reg_masks[3] = t & 7;
|
||||
@@ -163,6 +170,7 @@ __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
|
||||
c->watch_reg_use_cnt = 4;
|
||||
t = read_c0_watchhi3();
|
||||
write_c0_watchhi3(t | 0xff8);
|
||||
back_to_back_c0_hazard();
|
||||
t = read_c0_watchhi3();
|
||||
c->watch_reg_masks[3] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
|
Reference in New Issue
Block a user