Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "This is the main pull request for MIPS for 4.5 plus some 4.4 fixes. The executive summary: - ATH79 platform improvments, use DT bindings for the ATH79 USB PHY. - Avoid useless rebuilds for zboot. - jz4780: Add NEMC, BCH and NAND device tree nodes - Initial support for the MicroChip's DT platform. As all the device drivers are missing this is still of limited use. - Some Loongson3 cleanups. - The unavoidable whitespace polishing. - Reduce clock skew when synchronizing the CPU cycle counters on CPU startup. - Add MIPS R6 fixes. - Lots of cleanups across arch/mips as fallout from KVM. - Lots of minor fixes and changes for IEEE 754-2008 support to the FPU emulator / fp-assist software. - Minor Ralink, BCM47xx and bcm963xx platform support improvments. - Support SMP on BCM63168" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (84 commits) MIPS: zboot: Add support for serial debug using the PROM MIPS: zboot: Avoid useless rebuilds MIPS: BMIPS: Enable ARCH_WANT_OPTIONAL_GPIOLIB MIPS: bcm63xx: nvram: Remove unused bcm63xx_nvram_get_psi_size() function MIPS: bcm963xx: Update bcm_tag field image_sequence MIPS: bcm963xx: Move extended flash address to bcm_tag header file MIPS: bcm963xx: Move Broadcom BCM963xx image tag data structure MIPS: bcm63xx: nvram: Use nvram structure definition from header file MIPS: bcm963xx: Add Broadcom BCM963xx board nvram data structure MAINTAINERS: Add KVM for MIPS entry MIPS: KVM: Add missing newline to kvm_err() MIPS: Move KVM specific opcodes into asm/inst.h MIPS: KVM: Use cacheops.h definitions MIPS: Break down cacheops.h definitions MIPS: Use EXCCODE_ constants with set_except_vector() MIPS: Update trap codes MIPS: Move Cause.ExcCode trap codes to mipsregs.h MIPS: KVM: Make kvm_mips_{init,exit}() static MIPS: KVM: Refactor added offsetof()s MIPS: KVM: Convert EXPORT_SYMBOL to _GPL ...
This commit is contained in:
@@ -190,7 +190,7 @@ static inline void check_daddi(void)
|
||||
printk("Checking for the daddi bug... ");
|
||||
|
||||
local_irq_save(flags);
|
||||
handler = set_except_vector(12, handle_daddi_ov);
|
||||
handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
|
||||
/*
|
||||
* The following code fails to trigger an overflow exception
|
||||
* when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
|
||||
@@ -214,7 +214,7 @@ static inline void check_daddi(void)
|
||||
".set pop"
|
||||
: "=r" (v), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
set_except_vector(12, handler);
|
||||
set_except_vector(EXCCODE_OV, handler);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (daddi_ov) {
|
||||
@@ -225,14 +225,14 @@ static inline void check_daddi(void)
|
||||
printk("yes, workaround... ");
|
||||
|
||||
local_irq_save(flags);
|
||||
handler = set_except_vector(12, handle_daddi_ov);
|
||||
handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
|
||||
asm volatile(
|
||||
"addiu %1, $0, %2\n\t"
|
||||
"dsrl %1, %1, 1\n\t"
|
||||
"daddi %0, %1, %3"
|
||||
: "=r" (v), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
set_except_vector(12, handler);
|
||||
set_except_vector(EXCCODE_OV, handler);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (daddi_ov) {
|
||||
|
@@ -98,6 +98,161 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
|
||||
c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes
|
||||
* supported by FPU hardware.
|
||||
*/
|
||||
static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
|
||||
{
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
|
||||
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
|
||||
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
|
||||
unsigned long sr, fir, fcsr, fcsr0, fcsr1;
|
||||
|
||||
sr = read_c0_status();
|
||||
__enable_fpu(FPU_AS_IS);
|
||||
|
||||
fir = read_32bit_cp1_register(CP1_REVISION);
|
||||
if (fir & MIPS_FPIR_HAS2008) {
|
||||
fcsr = read_32bit_cp1_register(CP1_STATUS);
|
||||
|
||||
fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
|
||||
write_32bit_cp1_register(CP1_STATUS, fcsr0);
|
||||
fcsr0 = read_32bit_cp1_register(CP1_STATUS);
|
||||
|
||||
fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
|
||||
write_32bit_cp1_register(CP1_STATUS, fcsr1);
|
||||
fcsr1 = read_32bit_cp1_register(CP1_STATUS);
|
||||
|
||||
write_32bit_cp1_register(CP1_STATUS, fcsr);
|
||||
|
||||
if (!(fcsr0 & FPU_CSR_NAN2008))
|
||||
c->options |= MIPS_CPU_NAN_LEGACY;
|
||||
if (fcsr1 & FPU_CSR_NAN2008)
|
||||
c->options |= MIPS_CPU_NAN_2008;
|
||||
|
||||
if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008)
|
||||
c->fpu_msk31 &= ~FPU_CSR_ABS2008;
|
||||
else
|
||||
c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008;
|
||||
|
||||
if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008)
|
||||
c->fpu_msk31 &= ~FPU_CSR_NAN2008;
|
||||
else
|
||||
c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008;
|
||||
} else {
|
||||
c->options |= MIPS_CPU_NAN_LEGACY;
|
||||
}
|
||||
|
||||
write_c0_status(sr);
|
||||
} else {
|
||||
c->options |= MIPS_CPU_NAN_LEGACY;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IEEE 754 conformance mode to use. Affects the NaN encoding and the
|
||||
* ABS.fmt/NEG.fmt execution mode.
|
||||
*/
|
||||
static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT;
|
||||
|
||||
/*
|
||||
* Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes
|
||||
* to support by the FPU emulator according to the IEEE 754 conformance
|
||||
* mode selected. Note that "relaxed" straps the emulator so that it
|
||||
* allows 2008-NaN binaries even for legacy processors.
|
||||
*/
|
||||
static void cpu_set_nofpu_2008(struct cpuinfo_mips *c)
|
||||
{
|
||||
c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY);
|
||||
c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
|
||||
c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
|
||||
|
||||
switch (ieee754) {
|
||||
case STRICT:
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
|
||||
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
|
||||
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
|
||||
c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
|
||||
} else {
|
||||
c->options |= MIPS_CPU_NAN_LEGACY;
|
||||
c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
|
||||
}
|
||||
break;
|
||||
case LEGACY:
|
||||
c->options |= MIPS_CPU_NAN_LEGACY;
|
||||
c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
|
||||
break;
|
||||
case STD2008:
|
||||
c->options |= MIPS_CPU_NAN_2008;
|
||||
c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
|
||||
c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
|
||||
break;
|
||||
case RELAXED:
|
||||
c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
|
||||
* according to the "ieee754=" parameter.
|
||||
*/
|
||||
static void cpu_set_nan_2008(struct cpuinfo_mips *c)
|
||||
{
|
||||
switch (ieee754) {
|
||||
case STRICT:
|
||||
mips_use_nan_legacy = !!cpu_has_nan_legacy;
|
||||
mips_use_nan_2008 = !!cpu_has_nan_2008;
|
||||
break;
|
||||
case LEGACY:
|
||||
mips_use_nan_legacy = !!cpu_has_nan_legacy;
|
||||
mips_use_nan_2008 = !cpu_has_nan_legacy;
|
||||
break;
|
||||
case STD2008:
|
||||
mips_use_nan_legacy = !cpu_has_nan_2008;
|
||||
mips_use_nan_2008 = !!cpu_has_nan_2008;
|
||||
break;
|
||||
case RELAXED:
|
||||
mips_use_nan_legacy = true;
|
||||
mips_use_nan_2008 = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override
|
||||
* settings:
|
||||
*
|
||||
* strict: accept binaries that request a NaN encoding supported by the FPU
|
||||
* legacy: only accept legacy-NaN binaries
|
||||
* 2008: only accept 2008-NaN binaries
|
||||
* relaxed: accept any binaries regardless of whether supported by the FPU
|
||||
*/
|
||||
static int __init ieee754_setup(char *s)
|
||||
{
|
||||
if (!s)
|
||||
return -1;
|
||||
else if (!strcmp(s, "strict"))
|
||||
ieee754 = STRICT;
|
||||
else if (!strcmp(s, "legacy"))
|
||||
ieee754 = LEGACY;
|
||||
else if (!strcmp(s, "2008"))
|
||||
ieee754 = STD2008;
|
||||
else if (!strcmp(s, "relaxed"))
|
||||
ieee754 = RELAXED;
|
||||
else
|
||||
return -1;
|
||||
|
||||
if (!(boot_cpu_data.options & MIPS_CPU_FPU))
|
||||
cpu_set_nofpu_2008(&boot_cpu_data);
|
||||
cpu_set_nan_2008(&boot_cpu_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("ieee754", ieee754_setup);
|
||||
|
||||
/*
|
||||
* Set the FIR feature flags for the FPU emulator.
|
||||
*/
|
||||
@@ -113,6 +268,8 @@ static void cpu_set_nofpu_id(struct cpuinfo_mips *c)
|
||||
if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
|
||||
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
|
||||
value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W;
|
||||
if (c->options & MIPS_CPU_NAN_2008)
|
||||
value |= MIPS_FPIR_HAS2008;
|
||||
c->fpu_id = value;
|
||||
}
|
||||
|
||||
@@ -137,6 +294,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
|
||||
}
|
||||
|
||||
cpu_set_fpu_fcsr_mask(c);
|
||||
cpu_set_fpu_2008(c);
|
||||
cpu_set_nan_2008(c);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -147,6 +306,8 @@ static void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
|
||||
c->options &= ~MIPS_CPU_FPU;
|
||||
c->fpu_msk31 = mips_nofpu_msk31;
|
||||
|
||||
cpu_set_nofpu_2008(c);
|
||||
cpu_set_nan_2008(c);
|
||||
cpu_set_nofpu_id(c);
|
||||
}
|
||||
|
||||
|
@@ -11,6 +11,12 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/cpu-info.h>
|
||||
|
||||
/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
|
||||
bool mips_use_nan_legacy;
|
||||
bool mips_use_nan_2008;
|
||||
|
||||
/* FPU modes */
|
||||
enum {
|
||||
FP_FRE,
|
||||
@@ -68,15 +74,23 @@ static struct mode_req none_req = { true, true, false, true, true };
|
||||
int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
|
||||
bool is_interp, struct arch_elf_state *state)
|
||||
{
|
||||
struct elf32_hdr *ehdr32 = _ehdr;
|
||||
union {
|
||||
struct elf32_hdr e32;
|
||||
struct elf64_hdr e64;
|
||||
} *ehdr = _ehdr;
|
||||
struct elf32_phdr *phdr32 = _phdr;
|
||||
struct elf64_phdr *phdr64 = _phdr;
|
||||
struct mips_elf_abiflags_v0 abiflags;
|
||||
bool elf32;
|
||||
u32 flags;
|
||||
int ret;
|
||||
|
||||
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
|
||||
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
|
||||
|
||||
/* Lets see if this is an O32 ELF */
|
||||
if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
|
||||
if (ehdr32->e_flags & EF_MIPS_FP64) {
|
||||
if (elf32) {
|
||||
if (flags & EF_MIPS_FP64) {
|
||||
/*
|
||||
* Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
|
||||
* later if needed
|
||||
@@ -120,13 +134,50 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_check_elf(void *_ehdr, bool has_interpreter,
|
||||
int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
|
||||
struct arch_elf_state *state)
|
||||
{
|
||||
struct elf32_hdr *ehdr = _ehdr;
|
||||
union {
|
||||
struct elf32_hdr e32;
|
||||
struct elf64_hdr e64;
|
||||
} *ehdr = _ehdr;
|
||||
union {
|
||||
struct elf32_hdr e32;
|
||||
struct elf64_hdr e64;
|
||||
} *iehdr = _interp_ehdr;
|
||||
struct mode_req prog_req, interp_req;
|
||||
int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
|
||||
bool is_mips64;
|
||||
bool elf32;
|
||||
u32 flags;
|
||||
|
||||
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
|
||||
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
|
||||
|
||||
/*
|
||||
* Determine the NaN personality, reject the binary if not allowed.
|
||||
* Also ensure that any interpreter matches the executable.
|
||||
*/
|
||||
if (flags & EF_MIPS_NAN2008) {
|
||||
if (mips_use_nan_2008)
|
||||
state->nan_2008 = 1;
|
||||
else
|
||||
return -ENOEXEC;
|
||||
} else {
|
||||
if (mips_use_nan_legacy)
|
||||
state->nan_2008 = 0;
|
||||
else
|
||||
return -ENOEXEC;
|
||||
}
|
||||
if (has_interpreter) {
|
||||
bool ielf32;
|
||||
u32 iflags;
|
||||
|
||||
ielf32 = iehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
|
||||
iflags = ielf32 ? iehdr->e32.e_flags : iehdr->e64.e_flags;
|
||||
|
||||
if ((flags ^ iflags) & EF_MIPS_NAN2008)
|
||||
return -ELIBBAD;
|
||||
}
|
||||
|
||||
if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
|
||||
return 0;
|
||||
@@ -142,21 +193,18 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
|
||||
abi0 = abi1 = fp_abi;
|
||||
}
|
||||
|
||||
is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
|
||||
(ehdr->e_flags & EF_MIPS_ABI2);
|
||||
|
||||
if (is_mips64) {
|
||||
/* MIPS64 code always uses FR=1, thus the default is easy */
|
||||
state->overall_fp_mode = FP_FR1;
|
||||
|
||||
/* Disallow access to the various FPXX & FP64 ABIs */
|
||||
max_abi = MIPS_ABI_FP_SOFT;
|
||||
} else {
|
||||
if (elf32 && !(flags & EF_MIPS_ABI2)) {
|
||||
/* Default to a mode capable of running code expecting FR=0 */
|
||||
state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
|
||||
|
||||
/* Allow all ABIs we know about */
|
||||
max_abi = MIPS_ABI_FP_64A;
|
||||
} else {
|
||||
/* MIPS64 code always uses FR=1, thus the default is easy */
|
||||
state->overall_fp_mode = FP_FR1;
|
||||
|
||||
/* Disallow access to the various FPXX & FP64 ABIs */
|
||||
max_abi = MIPS_ABI_FP_SOFT;
|
||||
}
|
||||
|
||||
if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
|
||||
@@ -254,3 +302,27 @@ void mips_set_personality_fp(struct arch_elf_state *state)
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Select the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
|
||||
* in FCSR according to the ELF NaN personality.
|
||||
*/
|
||||
void mips_set_personality_nan(struct arch_elf_state *state)
|
||||
{
|
||||
struct cpuinfo_mips *c = &boot_cpu_data;
|
||||
struct task_struct *t = current;
|
||||
|
||||
t->thread.fpu.fcr31 = c->fpu_csr31;
|
||||
switch (state->nan_2008) {
|
||||
case 0:
|
||||
break;
|
||||
case 1:
|
||||
if (!(c->fpu_msk31 & FPU_CSR_NAN2008))
|
||||
t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
|
||||
if (!(c->fpu_msk31 & FPU_CSR_ABS2008))
|
||||
t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
@@ -21,7 +21,7 @@ static struct txx9_pio_reg __iomem *txx9_pioptr;
|
||||
|
||||
static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
|
||||
{
|
||||
return __raw_readl(&txx9_pioptr->din) & (1 << offset);
|
||||
return !!(__raw_readl(&txx9_pioptr->din) & (1 << offset));
|
||||
}
|
||||
|
||||
static void txx9_gpio_set_raw(unsigned int offset, int value)
|
||||
|
@@ -548,9 +548,6 @@ static const struct pt_regs_offset regoffset_table[] = {
|
||||
REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
|
||||
REG_OFFSET_NAME(c0_cause, cp0_cause),
|
||||
REG_OFFSET_NAME(c0_epc, cp0_epc),
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
REG_OFFSET_NAME(mpl0, mpl[0]),
|
||||
REG_OFFSET_NAME(mpl1, mpl[1]),
|
||||
|
@@ -623,7 +623,7 @@ static void __init request_crashkernel(struct resource *res)
|
||||
|
||||
#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
|
||||
#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
|
||||
#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_EXTEND)
|
||||
#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
|
||||
|
||||
static void __init arch_mem_init(char **cmdline_p)
|
||||
{
|
||||
|
@@ -202,6 +202,9 @@ static void boot_core(unsigned core)
|
||||
/* Ensure its coherency is disabled */
|
||||
write_gcr_co_coherence(0);
|
||||
|
||||
/* Start it with the legacy memory map and exception base */
|
||||
write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
|
||||
|
||||
/* Ensure the core can access the GCRs */
|
||||
access = read_gcr_access();
|
||||
access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
|
||||
|
@@ -17,34 +17,22 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
||||
static atomic_t count_start_flag = ATOMIC_INIT(0);
|
||||
static unsigned int initcount = 0;
|
||||
static atomic_t count_count_start = ATOMIC_INIT(0);
|
||||
static atomic_t count_count_stop = ATOMIC_INIT(0);
|
||||
static atomic_t count_reference = ATOMIC_INIT(0);
|
||||
|
||||
#define COUNTON 100
|
||||
#define NR_LOOPS 5
|
||||
#define NR_LOOPS 3
|
||||
|
||||
void synchronise_count_master(int cpu)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
unsigned int initcount;
|
||||
|
||||
printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* Notify the slaves that it's time to start
|
||||
*/
|
||||
atomic_set(&count_reference, read_c0_count());
|
||||
atomic_set(&count_start_flag, cpu);
|
||||
smp_wmb();
|
||||
|
||||
/* Count will be initialised to current timer for all CPU's */
|
||||
initcount = read_c0_count();
|
||||
|
||||
/*
|
||||
* We loop a few times to get a primed instruction cache,
|
||||
* then the last pass is more or less synchronised and
|
||||
@@ -63,9 +51,13 @@ void synchronise_count_master(int cpu)
|
||||
atomic_set(&count_count_stop, 0);
|
||||
smp_wmb();
|
||||
|
||||
/* this lets the slaves write their count register */
|
||||
/* Let the slave writes its count register */
|
||||
atomic_inc(&count_count_start);
|
||||
|
||||
/* Count will be initialised to current timer */
|
||||
if (i == 1)
|
||||
initcount = read_c0_count();
|
||||
|
||||
/*
|
||||
* Everyone initialises count in the last loop:
|
||||
*/
|
||||
@@ -73,7 +65,7 @@ void synchronise_count_master(int cpu)
|
||||
write_c0_count(initcount);
|
||||
|
||||
/*
|
||||
* Wait for all slaves to leave the synchronization point:
|
||||
* Wait for slave to leave the synchronization point:
|
||||
*/
|
||||
while (atomic_read(&count_count_stop) != 1)
|
||||
mb();
|
||||
@@ -83,7 +75,6 @@ void synchronise_count_master(int cpu)
|
||||
}
|
||||
/* Arrange for an interrupt in a short while */
|
||||
write_c0_compare(read_c0_count() + COUNTON);
|
||||
atomic_set(&count_start_flag, 0);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
@@ -98,19 +89,12 @@ void synchronise_count_master(int cpu)
|
||||
void synchronise_count_slave(int cpu)
|
||||
{
|
||||
int i;
|
||||
unsigned int initcount;
|
||||
|
||||
/*
|
||||
* Not every cpu is online at the time this gets called,
|
||||
* so we first wait for the master to say everyone is ready
|
||||
*/
|
||||
|
||||
while (atomic_read(&count_start_flag) != cpu)
|
||||
mb();
|
||||
|
||||
/* Count will be initialised to next expire for all CPU's */
|
||||
initcount = atomic_read(&count_reference);
|
||||
|
||||
for (i = 0; i < NR_LOOPS; i++) {
|
||||
atomic_inc(&count_count_start);
|
||||
while (atomic_read(&count_count_start) != 2)
|
||||
|
@@ -2250,7 +2250,7 @@ void __init trap_init(void)
|
||||
* Only some CPUs have the watch exceptions.
|
||||
*/
|
||||
if (cpu_has_watch)
|
||||
set_except_vector(23, handle_watch);
|
||||
set_except_vector(EXCCODE_WATCH, handle_watch);
|
||||
|
||||
/*
|
||||
* Initialise interrupt handlers
|
||||
@@ -2277,27 +2277,27 @@ void __init trap_init(void)
|
||||
if (board_be_init)
|
||||
board_be_init();
|
||||
|
||||
set_except_vector(0, using_rollback_handler() ? rollback_handle_int
|
||||
: handle_int);
|
||||
set_except_vector(1, handle_tlbm);
|
||||
set_except_vector(2, handle_tlbl);
|
||||
set_except_vector(3, handle_tlbs);
|
||||
set_except_vector(EXCCODE_INT, using_rollback_handler() ?
|
||||
rollback_handle_int : handle_int);
|
||||
set_except_vector(EXCCODE_MOD, handle_tlbm);
|
||||
set_except_vector(EXCCODE_TLBL, handle_tlbl);
|
||||
set_except_vector(EXCCODE_TLBS, handle_tlbs);
|
||||
|
||||
set_except_vector(4, handle_adel);
|
||||
set_except_vector(5, handle_ades);
|
||||
set_except_vector(EXCCODE_ADEL, handle_adel);
|
||||
set_except_vector(EXCCODE_ADES, handle_ades);
|
||||
|
||||
set_except_vector(6, handle_ibe);
|
||||
set_except_vector(7, handle_dbe);
|
||||
set_except_vector(EXCCODE_IBE, handle_ibe);
|
||||
set_except_vector(EXCCODE_DBE, handle_dbe);
|
||||
|
||||
set_except_vector(8, handle_sys);
|
||||
set_except_vector(9, handle_bp);
|
||||
set_except_vector(10, rdhwr_noopt ? handle_ri :
|
||||
set_except_vector(EXCCODE_SYS, handle_sys);
|
||||
set_except_vector(EXCCODE_BP, handle_bp);
|
||||
set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
|
||||
(cpu_has_vtag_icache ?
|
||||
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
|
||||
set_except_vector(11, handle_cpu);
|
||||
set_except_vector(12, handle_ov);
|
||||
set_except_vector(13, handle_tr);
|
||||
set_except_vector(14, handle_msa_fpe);
|
||||
set_except_vector(EXCCODE_CPU, handle_cpu);
|
||||
set_except_vector(EXCCODE_OV, handle_ov);
|
||||
set_except_vector(EXCCODE_TR, handle_tr);
|
||||
set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
|
||||
|
||||
if (current_cpu_type() == CPU_R6000 ||
|
||||
current_cpu_type() == CPU_R6000A) {
|
||||
@@ -2318,25 +2318,25 @@ void __init trap_init(void)
|
||||
board_nmi_handler_setup();
|
||||
|
||||
if (cpu_has_fpu && !cpu_has_nofpuex)
|
||||
set_except_vector(15, handle_fpe);
|
||||
set_except_vector(EXCCODE_FPE, handle_fpe);
|
||||
|
||||
set_except_vector(16, handle_ftlb);
|
||||
set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
|
||||
|
||||
if (cpu_has_rixiex) {
|
||||
set_except_vector(19, tlb_do_page_fault_0);
|
||||
set_except_vector(20, tlb_do_page_fault_0);
|
||||
set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
|
||||
set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
|
||||
}
|
||||
|
||||
set_except_vector(21, handle_msa);
|
||||
set_except_vector(22, handle_mdmx);
|
||||
set_except_vector(EXCCODE_MSADIS, handle_msa);
|
||||
set_except_vector(EXCCODE_MDMX, handle_mdmx);
|
||||
|
||||
if (cpu_has_mcheck)
|
||||
set_except_vector(24, handle_mcheck);
|
||||
set_except_vector(EXCCODE_MCHECK, handle_mcheck);
|
||||
|
||||
if (cpu_has_mipsmt)
|
||||
set_except_vector(25, handle_mt);
|
||||
set_except_vector(EXCCODE_THREAD, handle_mt);
|
||||
|
||||
set_except_vector(26, handle_dsp);
|
||||
set_except_vector(EXCCODE_DSPDIS, handle_dsp);
|
||||
|
||||
if (board_cache_error_setup)
|
||||
board_cache_error_setup();
|
||||
|
Reference in New Issue
Block a user