Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (68 commits) [MIPS] remove Documentation/mips/GT64120.README [MIPS] Malta: remaining bits of the board support code cleanup [MIPS] Malta: make the helper function static [MIPS] Malta: fix braces at single statement blocks [MIPS] Malta, Atlas: move an extern function declaration to the header file [MIPS] Malta: Use C89 style for comments [MIPS] Malta: else should follow close brace in malta_int.c [MIPS] Malta: remove a superfluous comment [MIPS] Malta: include <linux/cpu.h> instead of <asm/cpu.h> [MIPS] Malta, Atlas, Sead: remove an extern from .c files [MIPS] Malta: fix oversized lines in malta_int.c [MIPS] Malta: remove a dead function declaration [MIPS] Malta: use tabs not spaces [MIPS] Malta: set up the screen info in a separate function [MIPS] Malta: check the PCI clock frequency in a separate function [MIPS] Malta: use the KERN_ facility level in printk() [MIPS] Malta: use Linux kernel style for structure initialization [MIPS]: constify function pointer tables [MIPS] compat: handle argument endianess of sys32_(f)truncate64 with merge_64 [MIPS] Cobalt 64-bits kernels can be safely unmarked experimental ...
This commit is contained in:
@@ -18,6 +18,15 @@
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
static char bug64hit[] __initdata =
|
||||
"reliable operation impossible!\n%s";
|
||||
static char nowar[] __initdata =
|
||||
"Please report to <linux-mips@linux-mips.org>.";
|
||||
static char r4kwar[] __initdata =
|
||||
"Enable CPU_R4000_WORKAROUNDS to rectify.";
|
||||
static char daddiwar[] __initdata =
|
||||
"Enable CPU_DADDI_WORKAROUNDS to rectify.";
|
||||
|
||||
static inline void align_mod(const int align, const int mod)
|
||||
{
|
||||
asm volatile(
|
||||
@@ -155,13 +164,7 @@ static inline void check_mult_sh(void)
|
||||
}
|
||||
|
||||
printk("no.\n");
|
||||
panic("Reliable operation impossible!\n"
|
||||
#ifndef CONFIG_CPU_R4000
|
||||
"Configure for R4000 to enable the workaround."
|
||||
#else
|
||||
"Please report to <linux-mips@linux-mips.org>."
|
||||
#endif
|
||||
);
|
||||
panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
|
||||
}
|
||||
|
||||
static volatile int daddi_ov __initdata = 0;
|
||||
@@ -233,15 +236,11 @@ static inline void check_daddi(void)
|
||||
}
|
||||
|
||||
printk("no.\n");
|
||||
panic("Reliable operation impossible!\n"
|
||||
#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400)
|
||||
"Configure for R4000 or R4400 to enable the workaround."
|
||||
#else
|
||||
"Please report to <linux-mips@linux-mips.org>."
|
||||
#endif
|
||||
);
|
||||
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
|
||||
}
|
||||
|
||||
int daddiu_bug __initdata = -1;
|
||||
|
||||
static inline void check_daddiu(void)
|
||||
{
|
||||
long v, w, tmp;
|
||||
@@ -281,7 +280,9 @@ static inline void check_daddiu(void)
|
||||
: "=&r" (v), "=&r" (w), "=&r" (tmp)
|
||||
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
|
||||
|
||||
if (v == w) {
|
||||
daddiu_bug = v != w;
|
||||
|
||||
if (!daddiu_bug) {
|
||||
printk("no.\n");
|
||||
return;
|
||||
}
|
||||
@@ -303,18 +304,16 @@ static inline void check_daddiu(void)
|
||||
}
|
||||
|
||||
printk("no.\n");
|
||||
panic("Reliable operation impossible!\n"
|
||||
#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400)
|
||||
"Configure for R4000 or R4400 to enable the workaround."
|
||||
#else
|
||||
"Please report to <linux-mips@linux-mips.org>."
|
||||
#endif
|
||||
);
|
||||
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
|
||||
}
|
||||
|
||||
void __init check_bugs64_early(void)
|
||||
{
|
||||
check_mult_sh();
|
||||
check_daddiu();
|
||||
}
|
||||
|
||||
void __init check_bugs64(void)
|
||||
{
|
||||
check_mult_sh();
|
||||
check_daddi();
|
||||
check_daddiu();
|
||||
}
|
||||
|
@@ -188,6 +188,8 @@ static inline void check_wait(void)
|
||||
case CPU_AU1500:
|
||||
case CPU_AU1550:
|
||||
case CPU_AU1200:
|
||||
case CPU_AU1210:
|
||||
case CPU_AU1250:
|
||||
if (allow_au1k_wait)
|
||||
cpu_wait = au1k_wait;
|
||||
break;
|
||||
@@ -733,6 +735,11 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
|
||||
break;
|
||||
case 4:
|
||||
c->cputype = CPU_AU1200;
|
||||
if (2 == (c->processor_id & 0xff))
|
||||
c->cputype = CPU_AU1250;
|
||||
break;
|
||||
case 5:
|
||||
c->cputype = CPU_AU1210;
|
||||
break;
|
||||
default:
|
||||
panic("Unknown Au Core!");
|
||||
@@ -858,6 +865,8 @@ static __init const char *cpu_to_name(struct cpuinfo_mips *c)
|
||||
case CPU_AU1100: name = "Au1100"; break;
|
||||
case CPU_AU1550: name = "Au1550"; break;
|
||||
case CPU_AU1200: name = "Au1200"; break;
|
||||
case CPU_AU1210: name = "Au1210"; break;
|
||||
case CPU_AU1250: name = "Au1250"; break;
|
||||
case CPU_4KEC: name = "MIPS 4KEc"; break;
|
||||
case CPU_4KSC: name = "MIPS 4KSc"; break;
|
||||
case CPU_VR41XX: name = "NEC Vr41xx"; break;
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
|
||||
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
||||
* Copyright (C) 2001 MIPS Technologies, Inc.
|
||||
* Copyright (C) 2002 Maciej W. Rozycki
|
||||
* Copyright (C) 2002, 2007 Maciej W. Rozycki
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
|
||||
@@ -471,7 +471,13 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
||||
jr k0
|
||||
rfe
|
||||
#else
|
||||
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
|
||||
LONG_ADDIU k0, 4 /* stall on $k0 */
|
||||
#else
|
||||
.set at=v1
|
||||
LONG_ADDIU k0, 4
|
||||
.set noat
|
||||
#endif
|
||||
MTC0 k0, CP0_EPC
|
||||
/* I hope three instructions between MTC0 and ERET are enough... */
|
||||
ori k1, _THREAD_MASK
|
||||
|
@@ -161,8 +161,7 @@ static unsigned int translate_open_flags(int flags)
|
||||
int i;
|
||||
unsigned int ret = 0;
|
||||
|
||||
for (i = 0; i < (sizeof(open_flags_table) / sizeof(struct apsp_table));
|
||||
i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(open_flags_table); i++) {
|
||||
if( (flags & open_flags_table[i].sp) ) {
|
||||
ret |= open_flags_table[i].ap;
|
||||
}
|
||||
|
@@ -174,36 +174,16 @@ struct rlimit32 {
|
||||
int rlim_max;
|
||||
};
|
||||
|
||||
#ifdef __MIPSEB__
|
||||
asmlinkage long sys32_truncate64(const char __user * path, unsigned long __dummy,
|
||||
int length_hi, int length_lo)
|
||||
#endif
|
||||
#ifdef __MIPSEL__
|
||||
asmlinkage long sys32_truncate64(const char __user * path, unsigned long __dummy,
|
||||
int length_lo, int length_hi)
|
||||
#endif
|
||||
asmlinkage long sys32_truncate64(const char __user * path,
|
||||
unsigned long __dummy, int a2, int a3)
|
||||
{
|
||||
loff_t length;
|
||||
|
||||
length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo;
|
||||
|
||||
return sys_truncate(path, length);
|
||||
return sys_truncate(path, merge_64(a2, a3));
|
||||
}
|
||||
|
||||
#ifdef __MIPSEB__
|
||||
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
|
||||
int length_hi, int length_lo)
|
||||
#endif
|
||||
#ifdef __MIPSEL__
|
||||
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
|
||||
int length_lo, int length_hi)
|
||||
#endif
|
||||
int a2, int a3)
|
||||
{
|
||||
loff_t length;
|
||||
|
||||
length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo;
|
||||
|
||||
return sys_ftruncate(fd, length);
|
||||
return sys_ftruncate(fd, merge_64(a2, a3));
|
||||
}
|
||||
|
||||
static inline long
|
||||
|
@@ -17,7 +17,6 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/r4kcache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@@ -1,28 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2006 IBM Corporation
|
||||
*
|
||||
* Implements device information for i8253 timer chip
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation
|
||||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
static __init int add_pcspkr(void)
|
||||
{
|
||||
struct platform_device *pd;
|
||||
int ret;
|
||||
|
||||
pd = platform_device_alloc("pcspkr", -1);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = platform_device_add(pd);
|
||||
if (ret)
|
||||
platform_device_put(pd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
device_initcall(add_pcspkr);
|
@@ -62,6 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
);
|
||||
seq_printf(m, "shadow register sets\t: %d\n",
|
||||
cpu_data[n].srsets);
|
||||
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
|
||||
|
||||
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
|
||||
cpu_has_vce ? "%u" : "not available");
|
||||
@@ -89,7 +90,7 @@ static void c_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
struct seq_operations cpuinfo_op = {
|
||||
const struct seq_operations cpuinfo_op = {
|
||||
.start = c_start,
|
||||
.next = c_next,
|
||||
.stop = c_stop,
|
||||
|
@@ -40,7 +40,6 @@
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mips_mt.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/vpe.h>
|
||||
#include <asm/rtlx.h>
|
||||
|
@@ -8,7 +8,7 @@
|
||||
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
|
||||
* Copyright (C) 1996 Stoned Elipot
|
||||
* Copyright (C) 1999 Silicon Graphics, Inc.
|
||||
* Copyright (C) 2000 2001, 2002 Maciej W. Rozycki
|
||||
* Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
@@ -24,10 +24,12 @@
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp-ops.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
|
||||
@@ -561,6 +563,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
}
|
||||
#endif
|
||||
cpu_report();
|
||||
check_bugs_early();
|
||||
|
||||
#if defined(CONFIG_VT)
|
||||
#if defined(CONFIG_VGA_CONSOLE)
|
||||
@@ -573,9 +576,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
arch_mem_init(cmdline_p);
|
||||
|
||||
resource_init();
|
||||
#ifdef CONFIG_SMP
|
||||
plat_smp_setup();
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init fpu_disable(char *s)
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@@ -30,7 +31,6 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
@@ -215,68 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
|
||||
write_tc_c0_tchalt(TCHALT_H);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common setup before any secondaries are started
|
||||
* Make sure all CPU's are in a sensible state before we boot any of the
|
||||
* secondarys
|
||||
*/
|
||||
void __init plat_smp_setup(void)
|
||||
static void vsmp_send_ipi_single(int cpu, unsigned int action)
|
||||
{
|
||||
unsigned int mvpconf0, ntc, tc, ncpu = 0;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
int vpflags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
|
||||
|
||||
switch (action) {
|
||||
case SMP_CALL_FUNCTION:
|
||||
i = C_SW1;
|
||||
break;
|
||||
|
||||
case SMP_RESCHEDULE_YOURSELF:
|
||||
default:
|
||||
i = C_SW0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* 1:1 mapping of vpe and tc... */
|
||||
settc(cpu);
|
||||
write_vpe_c0_cause(read_vpe_c0_cause() | i);
|
||||
evpe(vpflags);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
vsmp_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
static void __cpuinit vsmp_init_secondary(void)
|
||||
{
|
||||
/* Enable per-cpu interrupts */
|
||||
|
||||
/* This is Malta specific: IPI,performance and timer inetrrupts */
|
||||
write_c0_status((read_c0_status() & ~ST0_IM ) |
|
||||
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
|
||||
}
|
||||
|
||||
static void __cpuinit vsmp_smp_finish(void)
|
||||
{
|
||||
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(0, mt_fpu_cpumask);
|
||||
cpu_set(smp_processor_id(), mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
if (!cpu_has_mipsmt)
|
||||
return;
|
||||
|
||||
/* disable MT so we can configure */
|
||||
dvpe();
|
||||
dmt();
|
||||
|
||||
/* Put MVPE's into 'configuration state' */
|
||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
mvpconf0 = read_c0_mvpconf0();
|
||||
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
|
||||
|
||||
/* we'll always have more TC's than VPE's, so loop setting everything
|
||||
to a sensible state */
|
||||
for (tc = 0; tc <= ntc; tc++) {
|
||||
settc(tc);
|
||||
|
||||
smp_tc_init(tc, mvpconf0);
|
||||
ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
|
||||
}
|
||||
|
||||
/* Release config state */
|
||||
clear_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
/* We'll wait until starting the secondaries before starting MVPE */
|
||||
|
||||
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
static void vsmp_cpus_done(void)
|
||||
{
|
||||
mips_mt_set_cpuoptions();
|
||||
|
||||
/* set up ipi interrupts */
|
||||
if (cpu_has_vint) {
|
||||
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
|
||||
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
|
||||
}
|
||||
|
||||
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
|
||||
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
|
||||
|
||||
setup_irq(cpu_ipi_resched_irq, &irq_resched);
|
||||
setup_irq(cpu_ipi_call_irq, &irq_call);
|
||||
|
||||
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
|
||||
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -287,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
* (unsigned long)idle->thread_info the gp
|
||||
* assumes a 1:1 mapping of TC => VPE
|
||||
*/
|
||||
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
|
||||
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
|
||||
{
|
||||
struct thread_info *gp = task_thread_info(idle);
|
||||
dvpe();
|
||||
@@ -321,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
|
||||
evpe(EVPE_ENABLE);
|
||||
}
|
||||
|
||||
void __cpuinit prom_init_secondary(void)
|
||||
/*
|
||||
* Common setup before any secondaries are started
|
||||
* Make sure all CPU's are in a sensible state before we boot any of the
|
||||
* secondarys
|
||||
*/
|
||||
static void __init vsmp_smp_setup(void)
|
||||
{
|
||||
/* Enable per-cpu interrupts */
|
||||
|
||||
/* This is Malta specific: IPI,performance and timer inetrrupts */
|
||||
write_c0_status((read_c0_status() & ~ST0_IM ) |
|
||||
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
|
||||
}
|
||||
|
||||
void __cpuinit prom_smp_finish(void)
|
||||
{
|
||||
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
|
||||
unsigned int mvpconf0, ntc, tc, ncpu = 0;
|
||||
unsigned int nvpe;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(smp_processor_id(), mt_fpu_cpumask);
|
||||
cpu_set(0, mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
if (!cpu_has_mipsmt)
|
||||
return;
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
/* disable MT so we can configure */
|
||||
dvpe();
|
||||
dmt();
|
||||
|
||||
void prom_cpus_done(void)
|
||||
{
|
||||
}
|
||||
/* Put MVPE's into 'configuration state' */
|
||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
void core_send_ipi(int cpu, unsigned int action)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
int vpflags;
|
||||
mvpconf0 = read_c0_mvpconf0();
|
||||
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
|
||||
|
||||
local_irq_save(flags);
|
||||
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
|
||||
smp_num_siblings = nvpe;
|
||||
|
||||
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
|
||||
/* we'll always have more TC's than VPE's, so loop setting everything
|
||||
to a sensible state */
|
||||
for (tc = 0; tc <= ntc; tc++) {
|
||||
settc(tc);
|
||||
|
||||
switch (action) {
|
||||
case SMP_CALL_FUNCTION:
|
||||
i = C_SW1;
|
||||
break;
|
||||
|
||||
case SMP_RESCHEDULE_YOURSELF:
|
||||
default:
|
||||
i = C_SW0;
|
||||
break;
|
||||
smp_tc_init(tc, mvpconf0);
|
||||
ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
|
||||
}
|
||||
|
||||
/* 1:1 mapping of vpe and tc... */
|
||||
settc(cpu);
|
||||
write_vpe_c0_cause(read_vpe_c0_cause() | i);
|
||||
evpe(vpflags);
|
||||
/* Release config state */
|
||||
clear_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
local_irq_restore(flags);
|
||||
/* We'll wait until starting the secondaries before starting MVPE */
|
||||
|
||||
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
|
||||
}
|
||||
|
||||
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
mips_mt_set_cpuoptions();
|
||||
|
||||
/* set up ipi interrupts */
|
||||
if (cpu_has_vint) {
|
||||
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
|
||||
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
|
||||
}
|
||||
|
||||
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
|
||||
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
|
||||
|
||||
setup_irq(cpu_ipi_resched_irq, &irq_resched);
|
||||
setup_irq(cpu_ipi_call_irq, &irq_call);
|
||||
|
||||
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
|
||||
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
|
||||
}
|
||||
|
||||
struct plat_smp_ops vsmp_smp_ops = {
|
||||
.send_ipi_single = vsmp_send_ipi_single,
|
||||
.send_ipi_mask = vsmp_send_ipi_mask,
|
||||
.init_secondary = vsmp_init_secondary,
|
||||
.smp_finish = vsmp_smp_finish,
|
||||
.cpus_done = vsmp_cpus_done,
|
||||
.boot_secondary = vsmp_boot_secondary,
|
||||
.smp_setup = vsmp_smp_setup,
|
||||
.prepare_cpus = vsmp_prepare_cpus,
|
||||
};
|
||||
|
@@ -37,7 +37,6 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
@@ -56,6 +55,44 @@ EXPORT_SYMBOL(cpu_online_map);
|
||||
extern void __init calibrate_delay(void);
|
||||
extern void cpu_idle(void);
|
||||
|
||||
/* Number of TCs (or siblings in Intel speak) per CPU core */
|
||||
int smp_num_siblings = 1;
|
||||
EXPORT_SYMBOL(smp_num_siblings);
|
||||
|
||||
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
|
||||
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
|
||||
EXPORT_SYMBOL(cpu_sibling_map);
|
||||
|
||||
/* representing cpus for which sibling maps can be computed */
|
||||
static cpumask_t cpu_sibling_setup_map;
|
||||
|
||||
static inline void set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
cpu_set(cpu, cpu_sibling_setup_map);
|
||||
|
||||
if (smp_num_siblings > 1) {
|
||||
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
||||
if (cpu_data[cpu].core == cpu_data[i].core) {
|
||||
cpu_set(i, cpu_sibling_map[cpu]);
|
||||
cpu_set(cpu, cpu_sibling_map[i]);
|
||||
}
|
||||
}
|
||||
} else
|
||||
cpu_set(cpu, cpu_sibling_map[cpu]);
|
||||
}
|
||||
|
||||
struct plat_smp_ops *mp_ops;
|
||||
|
||||
__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
|
||||
{
|
||||
if (ops)
|
||||
printk(KERN_WARNING "Overriding previous set SMP ops\n");
|
||||
|
||||
mp_ops = ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* First C code run on the secondary CPUs after being started up by
|
||||
* the master.
|
||||
@@ -72,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void)
|
||||
cpu_report();
|
||||
per_cpu_trap_init();
|
||||
mips_clockevent_init();
|
||||
prom_init_secondary();
|
||||
mp_ops->init_secondary();
|
||||
|
||||
/*
|
||||
* XXX parity protection should be folded in here when it's converted
|
||||
@@ -84,7 +121,8 @@ asmlinkage __cpuinit void start_secondary(void)
|
||||
cpu = smp_processor_id();
|
||||
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
||||
|
||||
prom_smp_finish();
|
||||
mp_ops->smp_finish();
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_callin_map);
|
||||
|
||||
@@ -155,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
|
||||
smp_mb();
|
||||
|
||||
/* Send a message to all other CPUs and wait for them to respond */
|
||||
core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
|
||||
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
|
||||
|
||||
/* Wait for response */
|
||||
/* FIXME: lock-up detection, backtrace on lock-up */
|
||||
@@ -249,7 +287,7 @@ void smp_send_stop(void)
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
prom_cpus_done();
|
||||
mp_ops->cpus_done();
|
||||
}
|
||||
|
||||
/* called from main before smp_init() */
|
||||
@@ -257,7 +295,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
init_new_context(current, &init_mm);
|
||||
current_thread_info()->cpu = 0;
|
||||
plat_prepare_cpus(max_cpus);
|
||||
mp_ops->prepare_cpus(max_cpus);
|
||||
set_cpu_sibling_map(0);
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
cpu_present_map = cpu_possible_map;
|
||||
#endif
|
||||
@@ -295,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
||||
if (IS_ERR(idle))
|
||||
panic(KERN_ERR "Fork failed for CPU %d", cpu);
|
||||
|
||||
prom_boot_secondary(cpu, idle);
|
||||
mp_ops->boot_secondary(cpu, idle);
|
||||
|
||||
/*
|
||||
* Trust is futile. We should really have timeouts ...
|
||||
|
@@ -14,7 +14,6 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
@@ -16,7 +16,6 @@
|
||||
#include <asm/hazards.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/time.h>
|
||||
|
@@ -50,8 +50,6 @@ int update_persistent_clock(struct timespec now)
|
||||
return rtc_mips_set_mmss(now.tv_sec);
|
||||
}
|
||||
|
||||
int (*mips_timer_state)(void);
|
||||
|
||||
int null_perf_irq(void)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -53,7 +53,6 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/vpe.h>
|
||||
#include <asm/kspd.h>
|
||||
#include <asm/mips_mt.h>
|
||||
|
||||
typedef void *vpe_handle;
|
||||
|
||||
|
Reference in New Issue
Block a user