Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
 - three fixes for 3.15 that didn't make it in time
 - limited Octeon 3 support.
 - paravirtualization support
 - improvment to platform support for Netlogix SOCs.
 - add support for powering down the Malta eval board in software
 - add many instructions to the in-kernel microassembler.
 - add support for the BPF JIT.
 - minor cleanups of the BCM47xx code.
 - large cleanup of math emu code resulting in significant code size
   reduction, better readability of the code and more accurate
   emulation.
 - improvments to the MIPS CPS code.
 - support C3 power status for the R4k count/compare clock device.
 - improvments to the GIO support for older SGI workstations.
 - increase number of supported CPUs to 256; this can be reached on
   certain embedded multithreaded ccNUMA configurations.
 - various small cleanups, updates and fixes

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (173 commits)
  MIPS: IP22/IP28: Improve GIO support
  MIPS: Octeon: Add twsi interrupt initialization for OCTEON 3XXX, 5XXX, 63XX
  DEC: Document the R4k MB ASIC mini interrupt controller
  DEC: Add self as the maintainer
  MIPS: Add microMIPS MSA support.
  MIPS: Replace calls to obsolete strict_strto call with kstrto* equivalents.
  MIPS: Replace obsolete strict_strto call with kstrto
  MIPS: BFP: Simplify code slightly.
  MIPS: Call find_vma with the mmap_sem held
  MIPS: Fix 'write_msa_##' inline macro.
  MIPS: Fix MSA toolchain support detection.
  mips: Update the email address of Geert Uytterhoeven
  MIPS: Add minimal defconfig for mips_paravirt
  MIPS: Enable build for new system 'paravirt'
  MIPS: paravirt: Add pci controller for virtio
  MIPS: Add code for new system 'paravirt'
  MIPS: Add functions for hypervisor call
  MIPS: OCTEON: Add OCTEON3 to __get_cpu_type
  MIPS: Add function get_ebase_cpunum
  MIPS: Add minimal support for OCTEON3 to c-r4k.c
  ...
This commit is contained in:
Linus Torvalds
2014-06-09 18:10:34 -07:00
256개의 변경된 파일8553개의 추가작업 그리고 7896개의 파일을 삭제

파일 보기

@@ -17,7 +17,6 @@ endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -42,7 +41,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
@@ -107,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_MIPS_CM) += mips-cm.o
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
obj-$(CONFIG_CPU_PM) += pm.o
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
#
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches

파일 보기

@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/smp-cps.h>
@@ -64,9 +65,6 @@ void output_ptreg_defines(void)
OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
OFFSET(PT_STATUS, pt_regs, cp0_status);
OFFSET(PT_CAUSE, pt_regs, cp0_cause);
#ifdef CONFIG_MIPS_MT_SMTC
OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
OFFSET(PT_MPL, pt_regs, mpl);
OFFSET(PT_MTP, pt_regs, mtp);
@@ -404,6 +402,20 @@ void output_pbe_defines(void)
}
#endif
#ifdef CONFIG_CPU_PM
void output_pm_defines(void)
{
COMMENT(" PM offsets. ");
#ifdef CONFIG_EVA
OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
#endif
OFFSET(SSS_SP, mips_static_suspend_state, sp);
BLANK();
}
#endif
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specfic offsets. ");
@@ -472,10 +484,14 @@ void output_kvm_defines(void)
void output_cps_defines(void)
{
COMMENT(" MIPS CPS offsets. ");
OFFSET(BOOTCFG_CORE, boot_config, core);
OFFSET(BOOTCFG_VPE, boot_config, vpe);
OFFSET(BOOTCFG_PC, boot_config, pc);
OFFSET(BOOTCFG_SP, boot_config, sp);
OFFSET(BOOTCFG_GP, boot_config, gp);
OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
}
#endif

파일 보기

@@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs)
return epc;
}
/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
int bc_false = 0;
unsigned int fcr31;
unsigned int bit;
if (!cpu_has_mmips)
return 0;
switch (insn.mm_i_format.opcode) {
case mm_pool32a_op:
if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
mm_pool32axf_op) {
switch (insn.mm_i_format.simmediate >>
MM_POOL32A_MINOR_SHIFT) {
case mm_jalr_op:
case mm_jalrhb_op:
case mm_jalrs_op:
case mm_jalrshb_op:
if (insn.mm_i_format.rt != 0) /* Not mm_jr */
regs->regs[insn.mm_i_format.rt] =
regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
}
break;
case mm_pool32i_op:
switch (insn.mm_i_format.rt) {
case mm_bltzals_op:
case mm_bltzal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case mm_bltz_op:
if ((long)regs->regs[insn.mm_i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgezals_op:
case mm_bgezal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case mm_bgez_op:
if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_blez_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgtz_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bc2f_op:
case mm_bc1f_op:
bc_false = 1;
/* Fall through */
case mm_bc2t_op:
case mm_bc1t_op:
preempt_disable();
if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
if (bc_false)
fcr31 = ~fcr31;
bit = (insn.mm_i_format.rs >> 2);
bit += (bit != 0);
bit += 23;
if (fcr31 & (1 << bit))
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
}
break;
case mm_pool16c_op:
switch (insn.mm_i_format.rt) {
case mm_jalr16_op:
case mm_jalrs16_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
/* Fall through */
case mm_jr16_op:
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
break;
case mm_beqz16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_bnez16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_b16_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc +
(insn.mm_b0_format.simmediate << 1);
return 1;
case mm_beq32_op:
if (regs->regs[insn.mm_i_format.rs] ==
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bne32_op:
if (regs->regs[insn.mm_i_format.rs] !=
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_jalx32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 28;
*contpc <<= 28;
*contpc |= (insn.j_format.target << 2);
return 1;
case mm_jals32_op:
case mm_jal32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
/* Fall through */
case mm_j32_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 27;
*contpc <<= 27;
*contpc |= (insn.j_format.target << 1);
set_isa16_mode(*contpc);
return 1;
}
return 0;
}
/*
* Compute return address and emulate branch in microMIPS mode after an
* exception only. It does not handle compact branches/jumps and cannot
@@ -366,7 +562,11 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case cop1_op:
preempt_disable();
if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
asm volatile(
".set push\n"
"\t.set mips1\n"
"\tcfc1\t%0,$31\n"
"\t.set pop" : "=r" (fcr31));
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();

파일 보기

@@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
cnt = gic_read_count();
cnt += (u64)delta;
gic_write_compare(cnt);
gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
@@ -73,7 +73,8 @@ int gic_clockevent_init(void)
cd = &per_cpu(gic_clockevent_device, cpu);
cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clockevent_set_clock(cd, gic_frequency);

파일 보기

@@ -12,17 +12,10 @@
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
#include <asm/gic.h>
/*
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
* of these routines with SMTC-specific variants.
*/
#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta,
return res;
}
#endif /* CONFIG_MIPS_MT_SMTC */
void mips_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode,
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
#ifndef CONFIG_MIPS_MT_SMTC
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
@@ -72,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
#ifdef CONFIG_CEVT_GIC
if (!gic_present)
#endif
cd->event_handler(cd);
}
@@ -82,8 +69,6 @@ out:
return IRQ_HANDLED;
}
#endif /* Not CONFIG_MIPS_MT_SMTC */
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
@@ -170,7 +155,6 @@ int c0_compare_int_usable(void)
return 1;
}
#ifndef CONFIG_MIPS_MT_SMTC
int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -195,7 +179,9 @@ int r4k_clockevent_init(void)
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
clockevent_set_clock(cd, mips_hpt_frequency);
@@ -210,9 +196,6 @@ int r4k_clockevent_init(void)
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
#ifdef CONFIG_CEVT_GIC
if (!gic_present)
#endif
clockevents_register_device(cd);
if (cp0_timer_irq_installed)
@@ -225,4 +208,3 @@ int r4k_clockevent_init(void)
return 0;
}
#endif /* Not CONFIG_MIPS_MT_SMTC */

파일 보기

@@ -1,324 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
/*
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
* or other MIPS MT cores.
*
* Notes on SMTC Support:
*
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
* But there's only one Count/Compare pair per VPE, and Compare
* interrupts are taken opportunisitically by available TCs
* bound to the VPE with the Count register. The new timer
* framework provides for global broadcasts, but we really
* want VPE-level multicasts for best behavior. So instead
* of invoking the high-level clock-event broadcast code,
* this version of SMTC support uses the historical SMTC
* multicast mechanisms "under the hood", appearing to the
* generic clock layer as if the interrupts are per-CPU.
*
* The approach taken here is to maintain a set of NR_CPUS
* virtual timers, and track which "CPU" needs to be alerted
* at each event.
*
* It's unlikely that we'll see a MIPS MT core with more than
* 2 VPEs, but we *know* that we won't need to handle more
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
* is always going to be overkill, but always going to be enough.
*/
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
* into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
* about once every 50 days. If that's actually a problem, one
* could alternate squashing 0 to 1 and to -1.
*/
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
#define ISVALID(x) ((x) != 0L)
/*
* Time comparison is subtle, as it's really truncated
* modular arithmetic.
*/
#define IS_SOONER(a, b, reference) \
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
/*
* CATCHUP_INCREMENT, used when the function falls behind the counter.
* Could be an increasing function instead of a constant;
*/
#define CATCHUP_INCREMENT 64
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long flags;
unsigned int mtflags;
unsigned long timestamp, reference, previous;
unsigned long nextcomp = 0L;
int vpe = current_cpu_data.vpe_id;
int cpu = smp_processor_id();
local_irq_save(flags);
mtflags = dmt();
/*
* Maintain the per-TC virtual timer
* and program the per-VPE shared Count register
* as appropriate here...
*/
reference = (unsigned long)read_c0_count();
timestamp = MAKEVALID(reference + delta);
/*
* To really model the clock, we have to catch the case
* where the current next-in-VPE timestamp is the old
* timestamp for the calling CPE, but the new value is
* in fact later. In that case, we have to do a full
* scan and discover the new next-in-VPE CPU id and
* timestamp.
*/
previous = smtc_nexttime[vpe][cpu];
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
&& IS_SOONER(previous, timestamp, reference)) {
int i;
int soonest = cpu;
/*
* Update timestamp array here, so that new
* value gets considered along with those of
* other virtual CPUs on the VPE.
*/
smtc_nexttime[vpe][cpu] = timestamp;
for_each_online_cpu(i) {
if (ISVALID(smtc_nexttime[vpe][i])
&& IS_SOONER(smtc_nexttime[vpe][i],
smtc_nexttime[vpe][soonest], reference)) {
soonest = i;
}
}
smtc_nextinvpe[vpe] = soonest;
nextcomp = smtc_nexttime[vpe][soonest];
/*
* Otherwise, we don't have to process the whole array rank,
* we just have to see if the event horizon has gotten closer.
*/
} else {
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
IS_SOONER(timestamp,
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
smtc_nextinvpe[vpe] = cpu;
nextcomp = timestamp;
}
/*
* Since next-in-VPE may me the same as the executing
* virtual CPU, we update the array *after* checking
* its value.
*/
smtc_nexttime[vpe][cpu] = timestamp;
}
/*
* It may be that, in fact, we don't need to update Compare,
* but if we do, we want to make sure we didn't fall into
* a crack just behind Count.
*/
if (ISVALID(nextcomp)) {
write_c0_compare(nextcomp);
ehb();
/*
* We never return an error, we just make sure
* that we trigger the handlers as quickly as
* we can if we fell behind.
*/
while ((nextcomp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX) {
nextcomp += CATCHUP_INCREMENT;
write_c0_compare(nextcomp);
ehb();
}
}
emt(mtflags);
local_irq_restore(flags);
return 0;
}
void smtc_distribute_timer(int vpe)
{
unsigned long flags;
unsigned int mtflags;
int cpu;
struct clock_event_device *cd;
unsigned long nextstamp;
unsigned long reference;
repeat:
nextstamp = 0L;
for_each_online_cpu(cpu) {
/*
* Find virtual CPUs within the current VPE who have
* unserviced timer requests whose time is now past.
*/
local_irq_save(flags);
mtflags = dmt();
if (cpu_data[cpu].vpe_id == vpe &&
ISVALID(smtc_nexttime[vpe][cpu])) {
reference = (unsigned long)read_c0_count();
if ((smtc_nexttime[vpe][cpu] - reference)
> (unsigned long)LONG_MAX) {
smtc_nexttime[vpe][cpu] = 0L;
emt(mtflags);
local_irq_restore(flags);
/*
* We don't send IPIs to ourself.
*/
if (cpu != smp_processor_id()) {
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
} else {
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
} else {
/* Local to VPE but Valid Time not yet reached. */
if (!ISVALID(nextstamp) ||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
reference)) {
smtc_nextinvpe[vpe] = cpu;
nextstamp = smtc_nexttime[vpe][cpu];
}
emt(mtflags);
local_irq_restore(flags);
}
} else {
emt(mtflags);
local_irq_restore(flags);
}
}
/* Reprogram for interrupt at next soonest timestamp for VPE */
if (ISVALID(nextstamp)) {
write_c0_compare(nextstamp);
ehb();
if ((nextstamp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX)
goto repeat;
}
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
handle_perf_irq(1);
if (read_c0_cause() & (1 << 30)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
smtc_distribute_timer(cpu_data[cpu].vpe_id);
}
return IRQ_HANDLED;
}
int smtc_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
int i;
int j;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (cpu == 0) {
for (i = 0; i < num_possible_cpus(); i++) {
smtc_nextinvpe[i] = 0;
for (j = 0; j < num_possible_cpus(); j++)
smtc_nexttime[i][j] = 0L;
}
/*
* SMTC also can't have the usablility test
* run by secondary TCs once Compare is in use.
*/
if (!c0_compare_int_usable())
return -ENXIO;
}
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it's liking.
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
if (get_c0_compare_int)
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
/* Calculate the min / max delta */
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
/*
* On SMTC we only want to do the data structure
* initialization and IRQ setup once.
*/
if (cpu)
return 0;
/*
* And we need the hwmask associated with the c0_compare
* vector to be initialized.
*/
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
return 0;
}

파일 보기

@@ -14,19 +14,43 @@
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_ID_OFS 0x2028
.extern mips_cm_base
.set noreorder
/*
* Set dest to non-zero if the core supports the MT ASE, else zero. If
* MT is not supported then branch to nomt.
*/
.macro has_mt dest, nomt
mfc0 \dest, CP0_CONFIG
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 1
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 2
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 3
andi \dest, \dest, MIPS_CONF3_MT
beqz \dest, \nomt
.endm
.section .text.cps-vec
.balign 0x1000
.set noreorder
LEAF(mips_cps_core_entry)
/*
* These first 8 bytes will be patched by cps_smp_setup to load the
* base address of the CM GCRs into register v1.
* These first 12 bytes will be patched by cps_smp_setup to load the
* base address of the CM GCRs into register v1 and the CCA to use into
* register s0.
*/
.quad 0
.word 0
/* Check whether we're here due to an NMI */
mfc0 k0, CP0_STATUS
@@ -117,10 +141,11 @@ icache_done:
add a0, a0, t0
dcache_done:
/* Set Kseg0 cacheable, coherent, write-back, write-allocate */
/* Set Kseg0 CCA to that in s0 */
mfc0 t0, CP0_CONFIG
ori t0, 0x7
xori t0, 0x2
xori t0, 0x7
or t0, t0, s0
mtc0 t0, CP0_CONFIG
ehb
@@ -134,21 +159,24 @@ dcache_done:
jr t0
nop
1: /* We're up, cached & coherent */
/*
* We're up, cached & coherent. Perform any further required core-level
* initialisation.
*/
1: jal mips_cps_core_init
nop
/*
* TODO: We should check the VPE number we intended to boot here, and
* if non-zero we should start that VPE and stop this one. For
* the moment this doesn't matter since CPUs are brought up
* sequentially and in order, but once hotplug is implemented
* this will need revisiting.
* Boot any other VPEs within this core that should be online, and
* deactivate this VPE if it should be offline.
*/
jal mips_cps_boot_vpes
nop
/* Off we go! */
la t0, mips_cps_bootcfg
lw t1, BOOTCFG_PC(t0)
lw gp, BOOTCFG_GP(t0)
lw sp, BOOTCFG_SP(t0)
lw t1, VPEBOOTCFG_PC(v0)
lw gp, VPEBOOTCFG_GP(v0)
lw sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
@@ -189,3 +217,271 @@ LEAF(excep_ejtag)
jr k0
nop
END(excep_ejtag)
LEAF(mips_cps_core_init)
#ifdef CONFIG_MIPS_MT
/* Check that the core implements the MT ASE */
has_mt t0, 3f
nop
.set push
.set mt
/* Only allow 1 TC per VPE to execute... */
dmt
/* ...and for the moment only 1 VPE */
dvpe
la t1, 1f
jr.hb t1
nop
/* Enter VPE configuration state */
1: mfc0 t0, CP0_MVPCONTROL
ori t0, t0, MVPCONTROL_VPC
mtc0 t0, CP0_MVPCONTROL
/* Retrieve the number of VPEs within the core */
mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
addi t7, t0, 1
/* If there's only 1, we're done */
beqz t0, 2f
nop
/* Loop through each VPE within this core */
li t5, 1
1: /* Operate on the appropriate TC */
mtc0 t5, CP0_VPECONTROL
ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */
mttc0 t5, CP0_TCBIND
/* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP
sll t1, t5, VPECONF0_XTC_SHIFT
or t0, t0, t1
mttc0 t0, CP0_VPECONF0
/* Set TC non-active, non-allocatable */
mttc0 zero, CP0_TCSTATUS
/* Set TC halted */
li t0, TCHALT_H
mttc0 t0, CP0_TCHALT
/* Next VPE */
addi t5, t5, 1
slt t0, t5, t7
bnez t0, 1b
nop
/* Leave VPE configuration state */
2: mfc0 t0, CP0_MVPCONTROL
xori t0, t0, MVPCONTROL_VPC
mtc0 t0, CP0_MVPCONTROL
3: .set pop
#endif
jr ra
nop
END(mips_cps_core_init)
LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */
la t0, mips_cm_base
lw t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
lw t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
la t1, mips_cps_core_bootcfg
lw t1, 0(t1)
addu t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
has_mt t6, 1f
li t9, 0
/* Find the number of VPEs present in the core */
mfc0 t1, CP0_MVPCONF0
srl t1, t1, MVPCONF0_PVPE_SHIFT
andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
addi t1, t1, 1
/* Calculate a mask for the VPE ID from EBase.CPUNum */
clz t1, t1
li t2, 31
subu t1, t2, t1
li t2, 1
sll t1, t2, t1
addiu t1, t1, -1
/* Retrieve the VPE ID from EBase.CPUNum */
mfc0 t9, $15, 1
and t9, t9, t1
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1
lw t7, COREBOOTCFG_VPECONFIG(t0)
addu v0, v0, t7
#ifdef CONFIG_MIPS_MT
/* If the core doesn't support MT then return */
bnez t6, 1f
nop
jr ra
nop
.set push
.set mt
1: /* Enter VPE configuration state */
dvpe
la t1, 1f
jr.hb t1
nop
1: mfc0 t1, CP0_MVPCONTROL
ori t1, t1, MVPCONTROL_VPC
mtc0 t1, CP0_MVPCONTROL
ehb
/* Loop through each VPE */
lw t6, COREBOOTCFG_VPEMASK(t0)
move t8, t6
li t5, 0
/* Check whether the VPE should be running. If not, skip it */
1: andi t0, t6, 1
beqz t0, 2f
nop
/* Operate on the appropriate TC */
mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC
or t0, t0, t5
mtc0 t0, CP0_VPECONTROL
ehb
/* Skip the VPE if its TC is not halted */
mftc0 t0, CP0_TCHALT
beqz t0, 2f
nop
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
mul t0, t0, t5
addu t0, t0, t7
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
mttc0 t1, CP0_TCRESTART
/* Set the TC stack pointer */
lw t1, VPEBOOTCFG_SP(t0)
mttgpr t1, sp
/* Set the TC global pointer */
lw t1, VPEBOOTCFG_GP(t0)
mttgpr t1, gp
/* Copy config from this VPE */
mfc0 t0, CP0_CONFIG
mttc0 t0, CP0_CONFIG
/* Ensure no software interrupts are pending */
mttc0 zero, CP0_CAUSE
mttc0 zero, CP0_STATUS
/* Set TC active, not interrupt exempt */
mftc0 t0, CP0_TCSTATUS
li t1, ~TCSTATUS_IXMT
and t0, t0, t1
ori t0, t0, TCSTATUS_A
mttc0 t0, CP0_TCSTATUS
/* Clear the TC halt bit */
mttc0 zero, CP0_TCHALT
/* Set VPE active */
mftc0 t0, CP0_VPECONF0
ori t0, t0, VPECONF0_VPA
mttc0 t0, CP0_VPECONF0
/* Next VPE */
2: srl t6, t6, 1
addi t5, t5, 1
bnez t6, 1b
nop
/* Leave VPE configuration state */
mfc0 t1, CP0_MVPCONTROL
xori t1, t1, MVPCONTROL_VPC
mtc0 t1, CP0_MVPCONTROL
ehb
evpe
/* Check whether this VPE is meant to be running */
li t0, 1
sll t0, t0, t9
and t0, t0, t8
bnez t0, 2f
nop
/* This VPE should be offline, halt the TC */
li t0, TCHALT_H
mtc0 t0, CP0_TCHALT
la t0, 1f
1: jr.hb t0
nop
2: .set pop
#endif /* CONFIG_MIPS_MT */
/* Return */
jr ra
nop
END(mips_cps_boot_vpes)
#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
.macro psstate dest
.set push
.set noat
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
la \dest, __per_cpu_offset
addu $1, $1, \dest
lw $1, 0($1)
la \dest, cps_cpu_state
addu \dest, \dest, $1
.set pop
.endm
LEAF(mips_cps_pm_save)
/* Save CPU state */
SUSPEND_SAVE_REGS
psstate t1
SUSPEND_SAVE_STATIC
jr v0
nop
END(mips_cps_pm_save)
LEAF(mips_cps_pm_restore)
/* Restore CPU state */
psstate t1
RESUME_RESTORE_STATIC
RESUME_RESTORE_REGS_RETURN
END(mips_cps_pm_restore)
#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */

파일 보기

@@ -62,7 +62,7 @@ static inline void check_errata(void)
case CPU_34K:
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
* This code only handles VPE0, any SMP/SMTC/RTOS code
* This code only handles VPE0, any SMP/RTOS code
* making use of VPE1 will be responsable for that VPE.
*/
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
@@ -423,7 +423,7 @@ static void decode_configs(struct cpuinfo_mips *c)
#ifndef CONFIG_MIPS_CPS
if (cpu_has_mips_r2) {
c->core = read_c0_ebase() & 0x3ff;
c->core = get_ebase_cpunum();
if (cpu_has_mipsmt)
c->core >>= fls(core_nvpes()) - 1;
}
@@ -684,21 +684,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_RM9000:
c->cputype = CPU_RM9000;
__cpu_name[cpu] = "RM9000";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
*
* 29 1 => 64 entry JTLB
* 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R8000:
c->cputype = CPU_R8000;
__cpu_name[cpu] = "RM8000";
@@ -1041,6 +1026,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
decode_configs(c);
/* JZRISC does not implement the CP0 counter. */
c->options &= ~MIPS_CPU_COUNTER;
BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_JZRISC:
c->cputype = CPU_JZRISC;
@@ -1074,6 +1060,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_NETLOGIC_XLP2XX:
case PRID_IMP_NETLOGIC_XLP9XX:
case PRID_IMP_NETLOGIC_XLP5XX:
c->cputype = CPU_XLP;
__cpu_name[cpu] = "Broadcom XLPII";
break;

파일 보기

@@ -16,9 +16,6 @@
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
@@ -89,41 +86,6 @@ FEXPORT(syscall_exit)
bnez t0, syscall_exit_work
restore_all: # restore full frame
#ifdef CONFIG_MIPS_MT_SMTC
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
ori v1, v0, TCSTATUS_IXMT
mtc0 v1, CP0_TCSTATUS
andi v0, TCSTATUS_IXMT
_ehb
mfc0 t0, CP0_TCCONTEXT
DMT 9 # dmt t1
jal mips_ihb
mfc0 t2, CP0_STATUS
andi t3, t0, 0xff00
or t2, t2, t3
mtc0 t2, CP0_STATUS
_ehb
andi t1, t1, VPECONTROL_TE
beqz t1, 1f
EMT
1:
mfc0 v1, CP0_TCSTATUS
/* We set IXMT above, XOR should clear it here */
xori v1, v1, TCSTATUS_IXMT
or v1, v0, v1
mtc0 v1, CP0_TCSTATUS
_ehb
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
/* Detect and execute deferred IPI "interrupts" */
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
jal deferred_smtc_ipi
LONG_S s0, TI_REGS($28)
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
RESTORE_AT

파일 보기

@@ -21,20 +21,6 @@
#include <asm/war.h>
#include <asm/thread_info.h>
#ifdef CONFIG_MIPS_MT_SMTC
#define PANIC_PIC(msg) \
.set push; \
.set nomicromips; \
.set reorder; \
PTR_LA a0,8f; \
.set noat; \
PTR_LA AT, panic; \
jr AT; \
9: b 9b; \
.set pop; \
TEXT(msg)
#endif
__INIT
/*
@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp)
SAVE_AT
.set push
.set noreorder
#ifdef CONFIG_MIPS_MT_SMTC
/*
* To keep from blindly blocking *all* interrupts
* during service by SMTC kernel, we also want to
* pass the IM value to be cleared.
*/
FEXPORT(except_vec_vi_mori)
ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end)
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC has an interesting problem that interrupts are level-triggered,
* and the CLI macro will clear EXL, potentially causing a duplicate
* interrupt service invocation. So we need to clear the associated
* IM bit of Status prior to doing CLI, and restore it after the
* service routine has been invoked - we must assume that the
* service routine will have cleared the state, and any active
* level represents a new or otherwised unserviced event...
*/
mfc0 t1, CP0_STATUS
and t0, a0, t1
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT
or t2, t0, t2
mtc0 t2, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0
mtc0 t1, CP0_STATUS
_ehb
#endif /* CONFIG_MIPS_MT_SMTC */
CLI
#ifdef CONFIG_TRACE_IRQFLAGS
move s0, v0
#ifdef CONFIG_MIPS_MT_SMTC
move s1, a0
#endif
TRACE_IRQS_OFF
#ifdef CONFIG_MIPS_MT_SMTC
move a0, s1
#endif
move v0, s0
#endif
@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.align 5
LEAF(handle_ri_rdhwr_vivt)
#ifdef CONFIG_MIPS_MT_SMTC
PANIC_PIC("handle_ri_rdhwr_vivt called")
#else
.set push
.set noat
.set noreorder
@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
#endif
END(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr)

파일 보기

@@ -35,33 +35,12 @@
*/
.macro setup_c0_status set clr
.set push
#ifdef CONFIG_MIPS_MT_SMTC
/*
* For SMTC, we need to set privilege and disable interrupts only for
* the current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
xor t0, ST0_EXL | ST0_ERL | \clr
mtc0 t0, CP0_STATUS
#else
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
#endif
.set pop
.endm
@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
#ifdef CONFIG_MIPS_MT_SMTC
/*
* In SMTC kernel, "CLI" is thread-specific, in TCStatus.
* We still need to enable interrupts globally in Status,
* and clear EXL/ERL.
*
* TCContext is used to track interrupt levels under
* service in SMTC kernel. Clear for boot TC before
* allowing any interrupts.
*/
mtc0 zero, CP0_TCCONTEXT
mfc0 t0, CP0_STATUS
ori t0, t0, 0xff1f
xori t0, t0, 0x001e
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Read-modify-writes of Status must be atomic, and this
* is one case where CLI is invoked without EXL being
* necessarily set. The CLI and setup_c0_status will
* in fact be redundant for all but the first TC of
* each VPE being booted.
*/
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
smp_slave_setup
setup_c0_status_sec
#ifdef CONFIG_MIPS_MT_SMTC
andi t2, t2, VPECONTROL_TE
beqz t2, 2f
EMT # emt
2:
#endif /* CONFIG_MIPS_MT_SMTC */
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */

파일 보기

@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = {
.irq_disable = disable_8259A_irq,
.irq_unmask = enable_8259A_irq,
.irq_mask_ack = mask_and_ack_8259A,
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
.irq_set_affinity = plat_set_irq_affinity,
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
};
/*
@@ -180,7 +177,6 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
smtc_im_ack_irq(irq);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;

파일 보기

@@ -224,29 +224,26 @@ void __init check_wait(void)
cpu_wait = r4k_wait;
*/
break;
case CPU_RM9000:
if ((c->processor_id & 0x00ff) >= 0x40)
cpu_wait = r4k_wait;
break;
default:
break;
}
}
static void smtc_idle_hook(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_idle_loop_hook(void);
smtc_idle_loop_hook();
#endif
}
void arch_cpu_idle(void)
{
smtc_idle_hook();
if (cpu_wait)
cpu_wait();
else
local_irq_enable();
}
#ifdef CONFIG_CPU_IDLE
int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
arch_cpu_idle();
return index;
}
#endif

파일 보기

@@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt)
(int)(cnt & 0xffffffff));
}
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
unsigned long flags;
local_irq_save(flags);
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
(int)(cnt >> 32));
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
(int)(cnt & 0xffffffff));
local_irq_restore(flags);
}
cycle_t gic_read_compare(void)
{
unsigned int hi, lo;

파일 보기

@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d)
*/
static void level_mask_and_ack_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
/* This actually needs to be a call into platform code */
smtc_im_ack_irq(irq);
}
/*
@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
smtc_im_ack_irq(irq);
}
/*

파일 보기

@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq)
*/
void ack_bad_irq(unsigned int irq)
{
smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq);
}
@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
check_stack_overflow();
if (!smtc_handle_on_other_cpu(irq))
generic_handle_irq(irq);
irq_exit();
}
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/*
* To avoid inefficient and in some cases pathological re-checking of
* IRQ affinity, we have this variant that skips the affinity check.
*/
void __irq_entry do_IRQ_no_affinity(unsigned int irq)
{
irq_enter();
smtc_im_backstop(irq);
generic_handle_irq(irq);
irq_exit();
}
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */

파일 보기

@@ -9,12 +9,18 @@
*/
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
void __iomem *mips_cpc_base;
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
phys_t __weak mips_cpc_phys_base(void)
{
u32 cpc_base;
@@ -39,6 +45,10 @@ phys_t __weak mips_cpc_phys_base(void)
int mips_cpc_probe(void)
{
phys_t addr;
unsigned cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
addr = mips_cpc_phys_base();
if (!addr)
@@ -50,3 +60,21 @@ int mips_cpc_probe(void)
return 0;
}
void mips_cpc_lock_other(unsigned int core)
{
unsigned curr_core;
preempt_disable();
curr_core = current_cpu_data.core;
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
}
void mips_cpc_unlock_other(void)
{
unsigned curr_core = current_cpu_data.core;
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
preempt_enable();
}

파일 보기

@@ -1,5 +1,5 @@
/*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
* General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/cpu.h>

파일 보기

@@ -1,5 +1,5 @@
/*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
* General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
int tc;
unsigned long haltval;
unsigned long tcstatval;
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_soft_dump(void);
#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags);
vpflags = dvpe();
@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl)
if (!haltval)
write_tc_c0_tchalt(0);
}
#ifdef CONFIG_MIPS_MT_SMTC
smtc_soft_dump();
#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void)
void mt_cflush_lockdown(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_lockdown(void);
smtc_cflush_lockdown();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_release(void);
smtc_cflush_release();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}

파일 보기

@@ -10,24 +10,12 @@
* Copyright (C) 2000 MIPS Technologies, Inc.
* written by Carsten Langgaard, carstenl@mips.com
*/
#include <asm/asm.h>
#include <asm/cachectl.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/asmmacro.h>
/*
* Offset to the current process status flags, the first 32 bytes of the
* stack are not used.
*/
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
#define USE_ALTERNATE_RESUME_IMPL 1
.set push
.set arch=mips64r2
#include "r4k_switch.S"
.set pop
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti, int usedfpu)
@@ -40,6 +28,61 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
/*
* check if we need to save FPU registers
*/
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
and t2, t0, t1
beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
LONG_S t0, TI_FLAGS(t3)
/*
* clear saved user stack CU1 bit
*/
LONG_L t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
.set push
.set arch=mips64r2
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
.set pop
1:
/* check if we need to save COP2 registers */
PTR_L t2, TASK_THREAD_INFO(a0)
LONG_L t0, ST_OFF(t2)
bbit0 t0, 30, 1f
/* Disable COP2 in the stored process state */
li t1, ST0_CU2
xor t0, t1
LONG_S t0, ST_OFF(t2)
/* Enable COP2 so we can save it */
mfc0 t0, CP0_STATUS
or t0, t1
mtc0 t0, CP0_STATUS
/* Save COP2 */
daddu a0, THREAD_CP2
jal octeon_cop2_save
dsubu a0, THREAD_CP2
/* Disable COP2 now that we are done */
mfc0 t0, CP0_STATUS
li t1, ST0_CU2
xor t0, t1
mtc0 t0, CP0_STATUS
1:
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
/* Check if we need to store CVMSEG state */
mfc0 t0, $11,7 /* CvmMemCtl */
@@ -85,12 +128,7 @@
move $28, a2
cpu_restore_nonscratch a1
#if (_THREAD_SIZE - 32) < 0x8000
PTR_ADDIU t0, $28, _THREAD_SIZE - 32
#else
PTR_LI t0, _THREAD_SIZE - 32
PTR_ADDU t0, $28
#endif
PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
mfc0 t1, CP0_STATUS /* Do we really need this? */

716
arch/mips/kernel/pm-cps.c Normal file
파일 보기

@@ -0,0 +1,716 @@
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/idle.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#include <asm/pm-cps.h>
#include <asm/smp-cps.h>
#include <asm/uasm.h>
/*
* cps_nc_entry_fn - type of a generated non-coherent state entry function
* @online: the count of online coupled VPEs
* @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
*
* The code entering & exiting non-coherent states is generated at runtime
* using uasm, in order to ensure that the compiler cannot insert a stray
* memory access at an unfortunate time and to allow the generation of optimal
* core-specific code particularly for cache routines. If coupled_coherence
* is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
* returns the number of VPEs that were in the wait state at the point this
* VPE left it. Returns garbage if coupled_coherence is zero or this is not
* the entry function for CPS_PM_NC_WAIT.
*/
typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
/*
* The entry point of the generated non-coherent idle state entry/exit
* functions. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
nc_asm_enter);
/* Bitmap indicating which states are supported by the system */
DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
/*
* Indicates the number of coupled VPEs ready to operate in a non-coherent
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
/*
* Used to synchronize entry to deep idle states. Actually per-core rather
* than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */
DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
/* A somewhat arbitrary number of labels & relocs for uasm */
static struct uasm_label labels[32] __initdata;
static struct uasm_reloc relocs[32] __initdata;
/* CPU dependant sync types */
static unsigned stype_intervention;
static unsigned stype_memory;
static unsigned stype_ordering;
enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9, k0, k1, gp, sp, fp, ra,
};
bool cps_pm_support_state(enum cps_pm_state state)
{
return test_bit(state, state_support);
}
static void coupled_barrier(atomic_t *a, unsigned online)
{
/*
* This function is effectively the same as
* cpuidle_coupled_parallel_barrier, which can't be used here since
* there's no cpuidle device.
*/
if (!coupled_coherence)
return;
smp_mb__before_atomic_inc();
atomic_inc(a);
while (atomic_read(a) < online)
cpu_relax();
if (atomic_inc_return(a) == online * 2) {
atomic_set(a, 0);
return;
}
while (atomic_read(a) > online)
cpu_relax();
}
int cps_pm_enter_state(enum cps_pm_state state)
{
unsigned cpu = smp_processor_id();
unsigned core = current_cpu_data.core;
unsigned online, left;
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
u32 *core_ready_count, *nc_core_ready_count;
void *nc_addr;
cps_nc_entry_fn entry;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
/* Check that there is an entry function for this state */
entry = per_cpu(nc_asm_enter, core)[state];
if (!entry)
return -EINVAL;
/* Calculate which coupled CPUs (VPEs) are online */
#ifdef CONFIG_MIPS_MT
if (cpu_online(cpu)) {
cpumask_and(coupled_mask, cpu_online_mask,
&cpu_sibling_map[cpu]);
online = cpumask_weight(coupled_mask);
cpumask_clear_cpu(cpu, coupled_mask);
} else
#endif
{
cpumask_clear(coupled_mask);
online = 1;
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
core_cfg = &mips_cps_core_bootcfg[core];
vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
vpe_cfg->gp = (unsigned long)current_thread_info();
vpe_cfg->sp = 0;
}
/* Indicate that this CPU might not be coherent */
cpumask_clear_cpu(cpu, &cpu_coherent_mask);
smp_mb__after_clear_bit();
/* Create a non-coherent mapping of the core ready_count */
core_ready_count = per_cpu(ready_count, core);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
ACCESS_ONCE(*nc_core_ready_count) = 0;
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
left = entry(online, nc_core_ready_count);
/* Remove the non-coherent mapping of ready_count */
kunmap_noncoherent();
/* Indicate that this CPU is definitely coherent */
cpumask_set_cpu(cpu, &cpu_coherent_mask);
/*
* If this VPE is the first to leave the non-coherent wait state then
* it needs to wake up any coupled VPEs still running their wait
* instruction so that they return to cpuidle, which can then complete
* coordination between the coupled VPEs & provide the governor with
* a chance to reflect on the length of time the VPEs were in the
* idle state.
*/
if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
arch_send_call_function_ipi_mask(coupled_mask);
return 0;
}
static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cache_desc *cache,
unsigned op, int lbl)
{
unsigned cache_size = cache->ways << cache->waybit;
unsigned i;
const unsigned unroll_lines = 32;
/* If the cache isn't present this function has it easy */
if (cache->flags & MIPS_CACHE_NOT_PRESENT)
return;
/* Load base address */
UASM_i_LA(pp, t0, (long)CKSEG0);
/* Calculate end address */
if (cache_size < 0x8000)
uasm_i_addiu(pp, t1, t0, cache_size);
else
UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
/* Start of cache op loop */
uasm_build_label(pl, *pp, lbl);
/* Generate the cache ops */
for (i = 0; i < unroll_lines; i++)
uasm_i_cache(pp, op, i * cache->linesz, t0);
/* Update the base address */
uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
/* Loop if we haven't reached the end address yet */
uasm_il_bne(pp, pr, t0, t1, lbl);
uasm_i_nop(pp);
}
static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cpuinfo_mips *cpu_info,
int lbl)
{
unsigned i, fsb_size = 8;
unsigned num_loads = (fsb_size * 3) / 2;
unsigned line_stride = 2;
unsigned line_size = cpu_info->dcache.linesz;
unsigned perf_counter, perf_event;
unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
/*
* Determine whether this CPU requires an FSB flush, and if so which
* performance counter/event reflect stalls due to a full FSB.
*/
switch (__get_cpu_type(cpu_info->cputype)) {
case CPU_INTERAPTIV:
perf_counter = 1;
perf_event = 51;
break;
case CPU_PROAPTIV:
/* Newer proAptiv cores don't require this workaround */
if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
return 0;
/* On older ones it's unavailable */
return -1;
/* CPUs which do not require the workaround */
case CPU_P5600:
return 0;
default:
WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
return -1;
}
/*
* Ensure that the fill/store buffer (FSB) is not holding the results
* of a prefetch, since if it is then the CPC sequencer may become
* stuck in the D3 (ClrBus) state whilst entering a low power state.
*/
/* Preserve perf counter setup */
uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Setup perf counter to count FSB full pipeline stalls */
uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
/* Base address for loads */
UASM_i_LA(pp, t0, (long)CKSEG0);
/* Start of clear loop */
uasm_build_label(pl, *pp, lbl);
/* Perform some loads to fill the FSB */
for (i = 0; i < num_loads; i++)
uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
/*
* Invalidate the new D-cache entries so that the cache will need
* refilling (via the FSB) if the loop is executed again.
*/
for (i = 0; i < num_loads; i++) {
uasm_i_cache(pp, Hit_Invalidate_D,
i * line_size * line_stride, t0);
uasm_i_cache(pp, Hit_Writeback_Inv_SD,
i * line_size * line_stride, t0);
}
/* Completion barrier */
uasm_i_sync(pp, stype_memory);
uasm_i_ehb(pp);
/* Check whether the pipeline stalled due to the FSB being full */
uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Loop if it didn't */
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_nop(pp);
/* Restore perf counter 1. The count may well now be wrong... */
uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
return 0;
}
static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
unsigned r_addr, int lbl)
{
uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
uasm_build_label(pl, *pp, lbl);
uasm_i_ll(pp, t1, 0, r_addr);
uasm_i_or(pp, t1, t1, t0);
uasm_i_sc(pp, t1, 0, r_addr);
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_nop(pp);
}
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
u32 *buf, *p;
const unsigned r_online = a0;
const unsigned r_nc_count = a1;
const unsigned r_pcohctl = t7;
const unsigned max_instrs = 256;
unsigned cpc_cmd;
int err;
enum {
lbl_incready = 1,
lbl_poll_cont,
lbl_secondary_hang,
lbl_disable_coherence,
lbl_flush_fsb,
lbl_invicache,
lbl_flushdcache,
lbl_hang,
lbl_set_cont,
lbl_secondary_cont,
lbl_decready,
};
/* Allocate a buffer to hold the generated code */
p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
if (!buf)
return NULL;
/* Clear labels & relocs ready for (re)use */
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/*
* Save CPU state. Note the non-standard calling convention
* with the return address placed in v0 to avoid clobbering
* the ra register before it is saved.
*/
UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
uasm_i_jalr(&p, v0, t0);
uasm_i_nop(&p);
}
/*
* Load addresses of required CM & CPC registers. This is done early
* because they're needed in both the enable & disable coherence steps
* but in the coupled case the enable step will only run on one VPE.
*/
UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
if (coupled_coherence) {
/* Increment ready_count */
uasm_i_sync(&p, stype_ordering);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_incready);
uasm_i_addiu(&p, t1, t1, 1);
/* Ordering barrier */
uasm_i_sync(&p, stype_ordering);
/*
* If this is the last VPE to become ready for non-coherence
* then it should branch below.
*/
uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
uasm_i_nop(&p);
if (state < CPS_PM_POWER_GATED) {
/*
* Otherwise this is not the last VPE to become ready
* for non-coherence. It needs to wait until coherence
* has been disabled before proceeding, which it will do
* by polling for the top bit of ready_count being set.
*/
uasm_i_addiu(&p, t1, zero, -1);
uasm_build_label(&l, p, lbl_poll_cont);
uasm_i_lw(&p, t0, 0, r_nc_count);
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
uasm_i_ehb(&p);
uasm_i_yield(&p, zero, t1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
/*
* The core will lose power & this VPE will not continue
* so it can simply halt here.
*/
uasm_i_addiu(&p, t0, zero, TCHALT_H);
uasm_i_mtc0(&p, t0, 2, 4);
uasm_build_label(&l, p, lbl_secondary_hang);
uasm_il_b(&p, &r, lbl_secondary_hang);
uasm_i_nop(&p);
}
}
/*
* This is the point of no return - this VPE will now proceed to
* disable coherence. At this point we *must* be sure that no other
* VPE within the core will interfere with the L1 dcache.
*/
uasm_build_label(&l, p, lbl_disable_coherence);
/* Invalidate the L1 icache */
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
Index_Invalidate_I, lbl_invicache);
/* Writeback & invalidate the L1 dcache */
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
Index_Writeback_Inv_D, lbl_flushdcache);
/* Completion barrier */
uasm_i_sync(&p, stype_memory);
uasm_i_ehb(&p);
/*
* Disable all but self interventions. The load from COHCTL is defined
* by the interAptiv & proAptiv SUMs as ensuring that the operation
* resulting from the preceeding store is complete.
*/
uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Sync to ensure previous interventions are complete */
uasm_i_sync(&p, stype_intervention);
uasm_i_ehb(&p);
/* Disable coherence */
uasm_i_sw(&p, zero, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
if (state >= CPS_PM_CLOCK_GATED) {
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
lbl_flush_fsb);
if (err)
goto out_err;
/* Determine the CPC command to issue */
switch (state) {
case CPS_PM_CLOCK_GATED:
cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
break;
case CPS_PM_POWER_GATED:
cpc_cmd = CPC_Cx_CMD_PWRDOWN;
break;
default:
BUG();
goto out_err;
}
/* Issue the CPC command */
UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
uasm_i_addiu(&p, t1, zero, cpc_cmd);
uasm_i_sw(&p, t1, 0, t0);
if (state == CPS_PM_POWER_GATED) {
/* If anything goes wrong just hang */
uasm_build_label(&l, p, lbl_hang);
uasm_il_b(&p, &r, lbl_hang);
uasm_i_nop(&p);
/*
* There's no point generating more code, the core is
* powered down & if powered back up will run from the
* reset vector not from here.
*/
goto gen_done;
}
/* Completion barrier */
uasm_i_sync(&p, stype_memory);
uasm_i_ehb(&p);
}
if (state == CPS_PM_NC_WAIT) {
/*
* At this point it is safe for all VPEs to proceed with
* execution. This VPE will set the top bit of ready_count
* to indicate to the other VPEs that they may continue.
*/
if (coupled_coherence)
cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
lbl_set_cont);
/*
* VPEs which did not disable coherence will continue
* executing, after coherence has been disabled, from this
* point.
*/
uasm_build_label(&l, p, lbl_secondary_cont);
/* Now perform our wait */
uasm_i_wait(&p, 0);
}
/*
* Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
* will run this. The first will actually re-enable coherence & the
* rest will just be performing a rather unusual nop.
*/
uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Completion barrier */
uasm_i_sync(&p, stype_memory);
uasm_i_ehb(&p);
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
uasm_i_sync(&p, stype_ordering);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_decready);
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
/* Ordering barrier */
uasm_i_sync(&p, stype_ordering);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
/*
* At this point it is safe for all VPEs to proceed with
* execution. This VPE will set the top bit of ready_count
* to indicate to the other VPEs that they may continue.
*/
cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
/*
* This core will be reliant upon another core sending a
* power-up command to the CPC in order to resume operation.
* Thus an arbitrary VPE can't trigger the core leaving the
* idle state and the one that disables coherence might as well
* be the one to re-enable it. The rest will continue from here
* after that has been done.
*/
uasm_build_label(&l, p, lbl_secondary_cont);
/* Ordering barrier */
uasm_i_sync(&p, stype_ordering);
}
/* The core is coherent, time to return to C code */
uasm_i_jr(&p, ra);
uasm_i_nop(&p);
gen_done:
/* Ensure the code didn't exceed the resources allocated for it */
BUG_ON((p - buf) > max_instrs);
BUG_ON((l - labels) > ARRAY_SIZE(labels));
BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
/* Patch branch offsets */
uasm_resolve_relocs(relocs, labels);
/* Flush the icache */
local_flush_icache_range((unsigned long)buf, (unsigned long)p);
return buf;
out_err:
kfree(buf);
return NULL;
}
static int __init cps_gen_core_entries(unsigned cpu)
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, core)[state])
continue;
if (!test_bit(state, state_support))
continue;
entry_fn = cps_gen_entry_code(cpu, state);
if (!entry_fn) {
pr_err("Failed to generate core %u state %u entry\n",
core, state);
clear_bit(state, state_support);
}
per_cpu(nc_asm_enter, core)[state] = entry_fn;
}
if (!per_cpu(ready_count, core)) {
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
per_cpu(ready_count_alloc, core) = core_rc;
/* Ensure ready_count is aligned to a cacheline boundary */
core_rc += dlinesz - 1;
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}
return 0;
}
static int __init cps_pm_init(void)
{
unsigned cpu;
int err;
/* Detect appropriate sync types for the system */
switch (current_cpu_data.cputype) {
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_M5150:
case CPU_P5600:
stype_intervention = 0x2;
stype_memory = 0x3;
stype_ordering = 0x10;
break;
default:
pr_warn("Power management is using heavyweight sync 0\n");
}
/* A CM is required for all non-coherent states */
if (!mips_cm_present()) {
pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
goto out;
}
/*
* If interrupts were enabled whilst running a wait instruction on a
* non-coherent core then the VPE may end up processing interrupts
* whilst non-coherent. That would be bad.
*/
if (cpu_wait == r4k_wait_irqoff)
set_bit(CPS_PM_NC_WAIT, state_support);
else
pr_warn("pm-cps: non-coherent wait unavailable\n");
/* Detect whether a CPC is present */
if (mips_cpc_present()) {
/* Detect whether clock gating is implemented */
if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
set_bit(CPS_PM_CLOCK_GATED, state_support);
else
pr_warn("pm-cps: CPC does not support clock gating\n");
/* Power gating is available with CPS SMP & any CPC */
if (mips_cps_smp_in_use())
set_bit(CPS_PM_POWER_GATED, state_support);
else
pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
} else {
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
for_each_present_cpu(cpu) {
err = cps_gen_core_entries(cpu);
if (err)
return err;
}
out:
return 0;
}
arch_initcall(cps_pm_init);

99
arch/mips/kernel/pm.c Normal file
파일 보기

@@ -0,0 +1,99 @@
/*
* Copyright (C) 2014 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* CPU PM notifiers for saving/restoring general CPU state.
*/
#include <linux/cpu_pm.h>
#include <linux/init.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/pm.h>
#include <asm/watch.h>
/* Used by PM helper macros in asm/pm.h */
struct mips_static_suspend_state mips_static_suspend_state;
/**
* mips_cpu_save() - Save general CPU state.
* Ensures that general CPU context is saved, notably FPU and DSP.
*/
static int mips_cpu_save(void)
{
/* Save FPU state */
lose_fpu(1);
/* Save DSP state */
save_dsp(current);
return 0;
}
/**
* mips_cpu_restore() - Restore general CPU state.
* Restores important CPU context.
*/
static void mips_cpu_restore(void)
{
unsigned int cpu = smp_processor_id();
/* Restore ASID */
if (current->mm)
write_c0_entryhi(cpu_asid(cpu, current->mm));
/* Restore DSP state */
restore_dsp(current);
/* Restore UserLocal */
if (cpu_has_userlocal)
write_c0_userlocal(current_thread_info()->tp_value);
/* Restore watch registers */
__restore_watch();
}
/**
* mips_pm_notifier() - Notifier for preserving general CPU context.
* @self: Notifier block.
* @cmd: CPU PM event.
* @v: Private data (unused).
*
* This is called when a CPU power management event occurs, and is used to
* ensure that important CPU context is preserved across a CPU power down.
*/
static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
int ret;
switch (cmd) {
case CPU_PM_ENTER:
ret = mips_cpu_save();
if (ret)
return NOTIFY_STOP;
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
mips_cpu_restore();
break;
}
return NOTIFY_OK;
}
static struct notifier_block mips_pm_notifier_block = {
.notifier_call = mips_pm_notifier,
};
static int __init mips_pm_init(void)
{
return cpu_pm_register_notifier(&mips_pm_notifier_block);
}
arch_initcall(mips_pm_init);

파일 보기

@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
*/
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC restores TCStatus after Status, and the CU bits
* are aliased there.
*/
childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
#endif
clear_tsk_thread_flag(p, TIF_USEDFPU);
#ifdef CONFIG_MIPS_MT_FPAFF

파일 보기

@@ -28,6 +28,7 @@
*/
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti, s32 fp_save)
@@ -87,18 +88,6 @@
PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-writes of Status must be atomic on a VPE */
mfc0 t2, CP0_TCSTATUS
ori t1, t2, TCSTATUS_IXMT
mtc0 t1, CP0_TCSTATUS
andi t2, t2, TCSTATUS_IXMT
_ehb
DMT 8 # dmt t0
move t1,ra
jal mips_ihb
move ra,t1
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
@@ -107,22 +96,12 @@
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
andi t0, t0, VPECONTROL_TE
beqz t0, 1f
emt
1:
mfc0 t1, CP0_TCSTATUS
xori t1, t1, TCSTATUS_IXMT
or t1, t1, t2
mtc0 t1, CP0_TCSTATUS
_ehb
#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0
jr ra
END(resume)
#endif /* USE_ALTERNATE_RESUME_IMPL */
/*
* Save a thread's fp context.
*/
@@ -176,19 +155,10 @@ LEAF(_restore_msa)
#define FPU_DEFAULT 0x00000000
LEAF(_init_fpu)
#ifdef CONFIG_MIPS_MT_SMTC
/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
mfc0 t0, CP0_TCSTATUS
/* Bit position is the same for Status, TCStatus */
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_TCSTATUS
#else /* Normal MIPS CU1 enable */
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
enable_fpu_hazard
li t1, FPU_DEFAULT

파일 보기

@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
unsigned long flags;
int i;
/* Ought not to be strictly necessary for SMTC builds */
local_irq_save(flags);
vpeflags = dvpe();
set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);

파일 보기

@@ -280,13 +280,6 @@ static void bmips_smp_finish(void)
irq_enable_hazard();
}
/*
* Runs on CPU0 after all CPUs have been booted
*/
static void bmips_cpus_done(void)
{
}
/*
* BMIPS5000 raceless IPIs
*
@@ -434,7 +427,6 @@ struct plat_smp_ops bmips43xx_smp_ops = {
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.cpus_done = bmips_cpus_done,
.send_ipi_single = bmips43xx_send_ipi_single,
.send_ipi_mask = bmips43xx_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
@@ -449,7 +441,6 @@ struct plat_smp_ops bmips5000_smp_ops = {
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.cpus_done = bmips_cpus_done,
.send_ipi_single = bmips5000_send_ipi_single,
.send_ipi_mask = bmips5000_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU

파일 보기

@@ -49,14 +49,11 @@ static void cmp_init_secondary(void)
/* Enable per-cpu interrupts: platform specific */
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
#endif
}
static void cmp_smp_finish(void)
@@ -75,11 +72,6 @@ static void cmp_smp_finish(void)
local_irq_enable();
}
static void cmp_cpus_done(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it running
* smp_bootstrap is the place to resume from
@@ -135,10 +127,6 @@ void __init cmp_smp_setup(void)
unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
#elif defined(CONFIG_MIPS_MT_SMTC)
unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
#endif
smp_num_siblings = nvpe;
}
@@ -165,7 +153,6 @@ struct plat_smp_ops cmp_smp_ops = {
.send_ipi_mask = gic_send_ipi_mask,
.init_secondary = cmp_init_secondary,
.smp_finish = cmp_smp_finish,
.cpus_done = cmp_cpus_done,
.boot_secondary = cmp_boot_secondary,
.smp_setup = cmp_smp_setup,
.prepare_cpus = cmp_prepare_cpus,

파일 보기

@@ -20,104 +20,43 @@
#include <asm/mips-cpc.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
static DECLARE_BITMAP(core_power, NR_CPUS);
struct boot_config mips_cps_bootcfg;
struct core_boot_config *mips_cps_core_bootcfg;
static void init_core(void)
static unsigned core_vpe_count(unsigned core)
{
unsigned int nvpes, t;
u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
unsigned cfg;
if (!cpu_has_mipsmt)
return;
if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
return 1;
/* Enter VPE configuration state */
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
/* Retrieve the count of VPEs in this core */
mvpconf0 = read_c0_mvpconf0();
nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpes;
for (t = 1; t < nvpes; t++) {
/* Use a 1:1 mapping of TC index to VPE index */
settc(t);
/* Bind 1 TC to this VPE */
tcbind = read_tc_c0_tcbind();
tcbind &= ~TCBIND_CURVPE;
tcbind |= t << TCBIND_CURVPE_SHIFT;
write_tc_c0_tcbind(tcbind);
/* Set exclusive TC, non-active, master */
vpeconf0 = read_vpe_c0_vpeconf0();
vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
vpeconf0 |= t << VPECONF0_XTC_SHIFT;
vpeconf0 |= VPECONF0_MVP;
write_vpe_c0_vpeconf0(vpeconf0);
/* Declare TC non-active, non-allocatable & interrupt exempt */
tcstatus = read_tc_c0_tcstatus();
tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
tcstatus |= TCSTATUS_IXMT;
write_tc_c0_tcstatus(tcstatus);
/* Halt the TC */
write_tc_c0_tchalt(TCHALT_H);
/* Allow only 1 TC to execute */
vpecontrol = read_vpe_c0_vpecontrol();
vpecontrol &= ~VPECONTROL_TE;
write_vpe_c0_vpecontrol(vpecontrol);
/* Copy (most of) Status from VPE 0 */
status = read_c0_status();
status &= ~(ST0_IM | ST0_IE | ST0_KSU);
status |= ST0_CU0;
write_vpe_c0_status(status);
/* Copy Config from VPE 0 */
write_vpe_c0_config(read_c0_config());
write_vpe_c0_config7(read_c0_config7());
/* Ensure no software interrupts are pending */
write_vpe_c0_cause(0);
/* Sync Count */
write_vpe_c0_count(read_c0_count());
}
/* Leave VPE configuration state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
static void __init cps_smp_setup(void)
{
unsigned int ncores, nvpes, core_vpes;
int c, v;
u32 core_cfg, *entry_code;
/* Detect & record VPE topology */
ncores = mips_cm_numcores();
pr_info("VPE topology ");
for (c = nvpes = 0; c < ncores; c++) {
if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
core_cfg = read_gcr_co_config();
core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
} else {
core_vpes = 1;
}
core_vpes = core_vpe_count(c);
pr_cont("%c%u", c ? ',' : '{', core_vpes);
/* Use the number of VPEs in core 0 for smp_num_siblings */
if (!c)
smp_num_siblings = core_vpes;
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_data[nvpes + v].core = c;
#ifdef CONFIG_MIPS_MT_SMP
@@ -137,19 +76,14 @@ static void __init cps_smp_setup(void)
__cpu_logical_map[v] = v;
}
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
/* Core 0 is powered up (we're running on it) */
bitmap_set(core_power, 0, 1);
/* Disable MT - we only want to run 1 TC per VPE */
if (cpu_has_mipsmt)
dmt();
/* Initialise core 0 */
init_core();
/* Patch the start of mips_cps_core_entry to provide the CM base */
entry_code = (u32 *)&mips_cps_core_entry;
UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
@@ -157,15 +91,99 @@ static void __init cps_smp_setup(void)
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
unsigned ncores, core_vpes, c, cca;
bool cca_unsuitable;
u32 *entry_code;
mips_mt_set_cpuoptions();
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK;
switch (cca) {
case 0x4: /* CWBE */
case 0x5: /* CWB */
/* The CCA is coherent, multi-core is fine */
cca_unsuitable = false;
break;
default:
/* CCA is not coherent, multi-core is not usable */
cca_unsuitable = true;
}
/* Warn the user if the CCA prevents multi-core */
ncores = mips_cm_numcores();
if (cca_unsuitable && ncores > 1) {
pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
cca);
for_each_present_cpu(c) {
if (cpu_data[c].core)
set_cpu_present(c, false);
}
}
/*
* Patch the start of mips_cps_core_entry to provide:
*
* v0 = CM base address
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
uasm_i_addiu(&entry_code, 16, 0, cca);
dma_cache_wback_inv((unsigned long)&mips_cps_core_entry,
(void *)entry_code - (void *)&mips_cps_core_entry);
/* Allocate core boot configuration structs */
mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
GFP_KERNEL);
if (!mips_cps_core_bootcfg) {
pr_err("Failed to allocate boot config for %u cores\n", ncores);
goto err_out;
}
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(c);
mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
sizeof(*mips_cps_core_bootcfg[c].vpe_config),
GFP_KERNEL);
if (!mips_cps_core_bootcfg[c].vpe_config) {
pr_err("Failed to allocate %u VPE boot configs\n",
core_vpes);
goto err_out;
}
}
/* Mark this CPU as booted */
atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
1 << cpu_vpe_id(&current_cpu_data));
return;
err_out:
/* Clean up allocations */
if (mips_cps_core_bootcfg) {
for (c = 0; c < ncores; c++)
kfree(mips_cps_core_bootcfg[c].vpe_config);
kfree(mips_cps_core_bootcfg);
mips_cps_core_bootcfg = NULL;
}
/* Effectively disable SMP by declaring CPUs not present */
for_each_possible_cpu(c) {
if (c == 0)
continue;
set_cpu_present(c, false);
}
}
static void boot_core(struct boot_config *cfg)
static void boot_core(unsigned core)
{
u32 access;
/* Select the appropriate core */
write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg)
/* Ensure the core can access the GCRs */
access = read_gcr_access();
access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
write_gcr_access(access);
/* Copy cfg */
mips_cps_bootcfg = *cfg;
if (mips_cpc_present()) {
/* Select the appropriate core */
write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
/* Reset the core */
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
mips_cpc_unlock_other();
} else {
/* Take the core out of reset */
write_gcr_co_reset_release(0);
}
/* The core is now powered up */
bitmap_set(core_power, cfg->core, 1);
bitmap_set(core_power, core, 1);
}
static void boot_vpe(void *info)
static void remote_vpe_boot(void *dummy)
{
struct boot_config *cfg = info;
u32 tcstatus, vpeconf0;
/* Enter VPE configuration state */
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cfg->vpe);
/* Set the TC restart PC */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* Activate the TC, allow interrupts */
tcstatus = read_tc_c0_tcstatus();
tcstatus &= ~TCSTATUS_IXMT;
tcstatus |= TCSTATUS_A;
write_tc_c0_tcstatus(tcstatus);
/* Clear the TC halt bit */
write_tc_c0_tchalt(0);
/* Activate the VPE */
vpeconf0 = read_vpe_c0_vpeconf0();
vpeconf0 |= VPECONF0_VPA;
write_vpe_c0_vpeconf0(vpeconf0);
/* Set the stack & global pointer registers */
write_tc_gpr_sp(cfg->sp);
write_tc_gpr_gp(cfg->gp);
/* Leave VPE configuration state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* Enable other VPEs to execute */
evpe(EVPE_ENABLE);
mips_cps_boot_vpes();
}
static void cps_boot_secondary(int cpu, struct task_struct *idle)
{
struct boot_config cfg;
unsigned core = cpu_data[cpu].core;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned int remote;
int err;
cfg.core = cpu_data[cpu].core;
cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
cfg.pc = (unsigned long)&smp_bootstrap;
cfg.sp = __KSTK_TOS(idle);
cfg.gp = (unsigned long)task_thread_info(idle);
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
vpe_cfg->sp = __KSTK_TOS(idle);
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
if (!test_bit(cfg.core, core_power)) {
atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
preempt_disable();
if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
boot_core(&cfg);
return;
boot_core(core);
goto out;
}
if (cfg.core != current_cpu_data.core) {
if (core != current_cpu_data.core) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
if (cpu_data[remote].core != cfg.core)
if (cpu_data[remote].core != core)
continue;
if (cpu_online(remote))
break;
}
BUG_ON(remote >= NR_CPUS);
err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
err = smp_call_function_single(remote, remote_vpe_boot,
NULL, 1);
if (err)
panic("Failed to call remote CPU\n");
return;
goto out;
}
BUG_ON(!cpu_has_mipsmt);
/* Boot a VPE on this core */
boot_vpe(&cfg);
mips_cps_boot_vpes();
out:
preempt_enable();
}
static void cps_init_secondary(void)
@@ -281,10 +269,6 @@ static void cps_init_secondary(void)
if (cpu_has_mipsmt)
dmt();
/* TODO: revisit this assumption once hotplug is implemented */
if (cpu_vpe_id(&current_cpu_data) == 0)
init_core();
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);
}
@@ -302,10 +286,148 @@ static void cps_smp_finish(void)
local_irq_enable();
}
static void cps_cpus_done(void)
#ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable(void)
{
unsigned cpu = smp_processor_id();
struct core_boot_config *core_cfg;
if (!cpu)
return -EBUSY;
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic_dec();
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
return 0;
}
static DECLARE_COMPLETION(cpu_death_chosen);
static unsigned cpu_death_sibling;
static enum {
CPU_DEATH_HALT,
CPU_DEATH_POWER,
} cpu_death;
void play_dead(void)
{
unsigned cpu, core;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
if (cpu_has_mipsmt) {
core = cpu_data[cpu].core;
/* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) {
if (cpu_data[cpu_death_sibling].core != core)
continue;
/*
* There is an online VPE within the core. Just halt
* this TC and leave the core alone.
*/
cpu_death = CPU_DEATH_HALT;
break;
}
}
/* This CPU has chosen its way out */
complete(&cpu_death_chosen);
if (cpu_death == CPU_DEATH_HALT) {
/* Halt this TC */
write_c0_tchalt(TCHALT_H);
instruction_hazard();
} else {
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
}
static void wait_for_sibling_halt(void *ptr_cpu)
{
unsigned cpu = (unsigned)ptr_cpu;
unsigned vpe_id = cpu_data[cpu].vpe_id;
unsigned halted;
unsigned long flags;
do {
local_irq_save(flags);
settc(vpe_id);
halted = read_tc_c0_tchalt();
local_irq_restore(flags);
} while (!(halted & TCHALT_H));
}
static void cps_cpu_die(unsigned int cpu)
{
unsigned core = cpu_data[cpu].core;
unsigned stat;
int err;
/* Wait for the cpu to choose its way out */
if (!wait_for_completion_timeout(&cpu_death_chosen,
msecs_to_jiffies(5000))) {
pr_err("CPU%u: didn't offline\n", cpu);
return;
}
/*
* Now wait for the CPU to actually offline. Without doing this that
* offlining may race with one or more of:
*
* - Onlining the CPU again.
* - Powering down the core if another VPE within it is offlined.
* - A sibling VPE entering a non-coherent state.
*
* In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
* with which we could race, so do nothing.
*/
if (cpu_death == CPU_DEATH_POWER) {
/*
* Wait for the core to enter a powered down or clock gated
* state, the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core.
*/
do {
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
mips_cpc_unlock_other();
} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
/* Indicate the core is powered off */
bitmap_clear(core_power, core, 1);
} else if (cpu_has_mipsmt) {
/*
* Have a CPU with access to the offlined CPUs registers wait
* for its TC to halt.
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
(void *)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
}
}
#endif /* CONFIG_HOTPLUG_CPU */
static struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
@@ -314,9 +436,18 @@ static struct plat_smp_ops cps_smp_ops = {
.smp_finish = cps_smp_finish,
.send_ipi_single = gic_send_ipi_single,
.send_ipi_mask = gic_send_ipi_mask,
.cpus_done = cps_cpus_done,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = cps_cpu_disable,
.cpu_die = cps_cpu_die,
#endif
};
bool mips_cps_smp_in_use(void)
{
extern struct plat_smp_ops *mp_ops;
return mp_ops == &cps_smp_ops;
}
int register_cps_smp_ops(void)
{
if (!mips_cm_present()) {

파일 보기

@@ -15,12 +15,14 @@
#include <linux/printk.h>
#include <asm/gic.h>
#include <asm/mips-cpc.h>
#include <asm/smp-ops.h>
void gic_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
unsigned int intr;
unsigned int core = cpu_data[cpu].core;
pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
smp_processor_id(), __func__, cpu, action, read_c0_status());
@@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action)
}
gic_send_ipi(intr);
if (mips_cpc_present() && (core != current_cpu_data.core)) {
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
}
}
local_irq_restore(flags);
}

파일 보기

@@ -183,10 +183,6 @@ static void vsmp_smp_finish(void)
local_irq_enable();
}
static void vsmp_cpus_done(void)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
@@ -287,7 +283,6 @@ struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_mask = vsmp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.cpus_done = vsmp_cpus_done,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,

파일 보기

@@ -36,11 +36,6 @@ static void up_smp_finish(void)
{
}
/* Hook for after all CPUs are online */
static void up_cpus_done(void)
{
}
/*
* Firmware CPU startup hook
*/
@@ -73,7 +68,6 @@ struct plat_smp_ops up_smp_ops = {
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
.smp_finish = up_smp_finish,
.cpus_done = up_cpus_done,
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,

파일 보기

@@ -43,10 +43,6 @@
#include <asm/time.h>
#include <asm/setup.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
@@ -66,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
cpumask_t cpu_coherent_mask;
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@@ -102,12 +100,6 @@ asmlinkage void start_secondary(void)
{
unsigned int cpu;
#ifdef CONFIG_MIPS_MT_SMTC
/* Only do cpu_probe for first TC of CPU */
if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
__cpu_name[smp_processor_id()] = __cpu_name[0];
else
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
per_cpu_trap_init(false);
@@ -124,6 +116,7 @@ asmlinkage void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
cpu_set(cpu, cpu_coherent_mask);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
@@ -173,7 +166,6 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
}
/* called from main before smp_init() */
@@ -186,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */
@@ -238,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm)
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
* o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
*/
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{
#ifndef CONFIG_MIPS_MT_SMTC
smp_call_function(func, info, 1);
#endif
}
static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
@@ -404,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *))
}
EXPORT_SYMBOL(dump_send_ipi);
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask)
{
atomic_t *count;
struct call_single_data *csd;
int cpu;
for_each_cpu(cpu, mask) {
count = &per_cpu(tick_broadcast_count, cpu);
csd = &per_cpu(tick_broadcast_csd, cpu);
if (atomic_inc_return(count) == 1)
smp_call_function_single_async(cpu, csd);
}
}
static void tick_broadcast_callee(void *info)
{
int cpu = smp_processor_id();
tick_receive_broadcast();
atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
}
static int __init tick_broadcast_init(void)
{
struct call_single_data *csd;
int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
csd = &per_cpu(tick_broadcast_csd, cpu);
csd->func = tick_broadcast_callee;
}
return 0;
}
early_initcall(tick_broadcast_init);
#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */

파일 보기

@@ -1,133 +0,0 @@
/*
* Assembly Language Functions for MIPS MT SMTC support
*/
/*
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
#include <asm/regdef.h>
#include <asm/asmmacro.h>
#include <asm/stackframe.h>
#include <asm/irqflags.h>
/*
* "Software Interrupt" linkage.
*
* This is invoked when an "Interrupt" is sent from one TC to another,
* where the TC to be interrupted is halted, has it's Restart address
* and Status values saved by the "remote control" thread, then modified
* to cause execution to begin here, in kenel mode. This code then
* disguises the TC state as that of an exception and transfers
* control to the general exception or vectored interrupt handler.
*/
.set noreorder
/*
The __smtc_ipi_vector would use k0 and k1 as temporaries and
1) Set EXL (this is per-VPE, so this can't be done by proxy!)
2) Restore the K/CU and IXMT bits to the pre "exception" state
(EXL means no interrupts and access to the kernel map).
3) Set EPC to be the saved value of TCRestart.
4) Jump to the exception handler entry point passed by the sender.
CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
*/
/*
* Reviled and slandered vision: Set EXL and restore K/CU/IXMT
* state of pre-halt thread, then save everything and call
* thought some function pointer to imaginary_exception, which
* will parse a register value or memory message queue to
* deliver things like interprocessor interrupts. On return
* from that function, jump to the global ret_from_irq code
* to invoke the scheduler and return as appropriate.
*/
#define PT_PADSLOT4 (PT_R0-8)
#define PT_PADSLOT5 (PT_R0-4)
.text
.align 5
FEXPORT(__smtc_ipi_vector)
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
.set noat
/* Disable thread scheduling to make Status update atomic */
DMT 27 # dmt k1
_ehb
/* Set EXL */
mfc0 k0,CP0_STATUS
ori k0,k0,ST0_EXL
mtc0 k0,CP0_STATUS
_ehb
/* Thread scheduling now inhibited by EXL. Restore TE state. */
andi k1,k1,VPECONTROL_TE
beqz k1,1f
emt
1:
/*
* The IPI sender has put some information on the anticipated
* kernel stack frame. If we were in user mode, this will be
* built above the saved kernel SP. If we were already in the
* kernel, it will be built above the current CPU SP.
*
* Were we in kernel mode, as indicated by CU0?
*/
sll k1,k0,3
.set noreorder
bltz k1,2f
move k1,sp
.set reorder
/*
* If previously in user mode, set CU0 and use kernel stack.
*/
li k1,ST0_CU0
or k1,k1,k0
mtc0 k1,CP0_STATUS
_ehb
get_saved_sp
/* Interrupting TC will have pre-set values in slots in the new frame */
2: subu k1,k1,PT_SIZE
/* Load TCStatus Value */
lw k0,PT_TCSTATUS(k1)
/* Write it to TCStatus to restore CU/KSU/IXMT state */
mtc0 k0,$2,1
_ehb
lw k0,PT_EPC(k1)
mtc0 k0,CP0_EPC
/* Save all will redundantly recompute the SP, but use it for now */
SAVE_ALL
CLI
TRACE_IRQS_OFF
/* Function to be invoked passed stack pad slot 5 */
lw t0,PT_PADSLOT5(sp)
/* Argument from sender passed in stack pad slot 4 */
lw a0,PT_PADSLOT4(sp)
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
jr t0
/*
* Called from idle loop to provoke processing of queued IPIs
* First IPI message in queue passed as argument.
*/
LEAF(self_ipi)
/* Before anything else, block interrupts */
mfc0 t0,CP0_TCSTATUS
ori t1,t0,TCSTATUS_IXMT
mtc0 t1,CP0_TCSTATUS
_ehb
/* We know we're in kernel mode, so prepare stack frame */
subu t1,sp,PT_SIZE
sw ra,PT_EPC(t1)
sw a0,PT_PADSLOT4(t1)
la t2,ipi_decode
sw t2,PT_PADSLOT5(t1)
/* Save pre-disable value of TCStatus */
sw t0,PT_TCSTATUS(t1)
j __smtc_ipi_vector
nop
END(self_ipi)

파일 보기

@@ -1,102 +0,0 @@
/*
* /proc hooks for SMTC kernel
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/smtc_proc.h>
/*
* /proc diagnostic and statistics hooks
*/
/*
* Statistics gathered
*/
unsigned long selfipis[NR_CPUS];
struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
atomic_t smtc_fpu_recoveries;
static int smtc_proc_show(struct seq_file *m, void *v)
{
int i;
extern unsigned long ebase;
seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status);
seq_printf(m, "Config7: 0x%08x\n", read_c0_config7());
seq_printf(m, "EBASE: 0x%08lx\n", ebase);
seq_printf(m, "Counter Interrupts taken per CPU (TC)\n");
for (i=0; i < NR_CPUS; i++)
seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
seq_printf(m, "Self-IPIs by CPU:\n");
for(i = 0; i < NR_CPUS; i++)
seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
return 0;
}
static int smtc_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, smtc_proc_show, NULL);
}
static const struct file_operations smtc_proc_fops = {
.open = smtc_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void init_smtc_stats(void)
{
int i;
for (i=0; i<NR_CPUS; i++) {
smtc_cpu_stats[i].timerints = 0;
smtc_cpu_stats[i].selfipis = 0;
}
atomic_set(&smtc_fpu_recoveries, 0);
proc_create("smtc", 0444, NULL, &smtc_proc_fops);
}
static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
unsigned long action_unused, void *data)
{
struct proc_cpuinfo_notifier_args *pcn = data;
struct seq_file *m = pcn->m;
unsigned long n = pcn->n;
if (!cpu_has_mipsmt)
return NOTIFY_OK;
seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
return NOTIFY_OK;
}
static int __init proc_cpuinfo_notifier_init(void)
{
return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
}
subsys_initcall(proc_cpuinfo_notifier_init);

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다. Load Diff

파일 보기

@@ -6,8 +6,6 @@
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...)
*
* FIXME: broken for SMTC
*/
#include <linux/kernel.h>
@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu)
unsigned long flags;
unsigned int initcount;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC needs to synchronise per VPE, not per CPU
* ignore for now
*/
return;
#endif
printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu)
int i;
unsigned int initcount;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC needs to synchronise per VPE, not per CPU
* ignore for now
*/
return;
#endif
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready

파일 보기

@@ -26,7 +26,6 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
/*

파일 보기

@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/cpu_pm.h>
#include <linux/kexec.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -370,9 +371,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long dvpret;
#endif /* CONFIG_MIPS_MT_SMTC */
oops_enter();
@@ -382,13 +380,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
console_verbose();
raw_spin_lock_irq(&die_lock);
#ifdef CONFIG_MIPS_MT_SMTC
dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
@@ -712,10 +704,12 @@ int process_fpemu_return(int sig, void __user *fault_addr)
si.si_addr = fault_addr;
si.si_signo = sig;
if (sig == SIGSEGV) {
down_read(&current->mm->mmap_sem);
if (find_vma(current->mm, (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
up_read(&current->mm->mmap_sem);
} else {
si.si_code = BUS_ADRERR;
}
@@ -1759,19 +1753,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
extern char rollback_except_vec_vi;
char *vec_start = using_rollback_handler() ?
&rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
* not only with the address of the handler, but with the
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int mori_offset = &except_vec_vi_mori - vec_start + 2;
#else
const int mori_offset = &except_vec_vi_mori - vec_start;
#endif
#endif /* CONFIG_MIPS_MT_SMTC */
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = &except_vec_vi_lui - vec_start + 2;
const int ori_offset = &except_vec_vi_ori - vec_start + 2;
@@ -1795,12 +1776,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
#else
handler_len);
#endif
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
h = (u16 *)(b + mori_offset);
*h = (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
h = (u16 *)(b + lui_offset);
*h = (handler >> 16) & 0xffff;
h = (u16 *)(b + ori_offset);
@@ -1865,32 +1840,16 @@ static int __init ulri_disable(char *s)
}
__setup("noulri", ulri_disable);
void per_cpu_trap_init(bool is_boot_cpu)
/* configure STATUS register */
static void configure_status(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
/*
* Only do per_cpu_trap_init() for first TC of Each VPE.
* Note that this hack assumes that the SMTC init code
* assigns TCs consecutively and in ascending order.
*/
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
secondaryTC = 1;
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
unsigned int status_set = ST0_CU0;
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
@@ -1901,6 +1860,12 @@ void per_cpu_trap_init(bool is_boot_cpu)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
}
/* configure HWRENA register */
static void configure_hwrena(void)
{
unsigned int hwrena = cpu_hwrena_impl_bits;
if (cpu_has_mips_r2)
hwrena |= 0x0000000f;
@@ -1910,11 +1875,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
if (hwrena)
write_c0_hwrena(hwrena);
}
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
static void configure_exception_vector(void)
{
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
@@ -1930,6 +1894,16 @@ void per_cpu_trap_init(bool is_boot_cpu)
} else
set_c0_cause(CAUSEF_IV);
}
}
void per_cpu_trap_init(bool is_boot_cpu)
{
unsigned int cpu = smp_processor_id();
configure_status();
configure_hwrena();
configure_exception_vector();
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
@@ -1949,10 +1923,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
cp0_perfcount_irq = -1;
}
#ifdef CONFIG_MIPS_MT_SMTC
}
#endif /* CONFIG_MIPS_MT_SMTC */
if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
@@ -1961,23 +1931,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
/* Boot CPU's cache setup in setup_arch(). */
if (!is_boot_cpu)
cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP();
}
@@ -2185,3 +2142,32 @@ void __init trap_init(void)
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
configure_status();
configure_hwrena();
configure_exception_vector();
/* Restore register with CPU number for TLB handlers */
TLBMISS_HANDLER_RESTORE();
break;
}
return NOTIFY_OK;
}
static struct notifier_block trap_pm_notifier_block = {
.notifier_call = trap_pm_notifier,
};
static int __init trap_pm_init(void)
{
return cpu_pm_register_notifier(&trap_pm_notifier_block);
}
arch_initcall(trap_pm_init);

파일 보기

@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v)
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/*
* SMTC/SMVP kernels manage VPE enable independently,
* but uniprocessor kernels need to turn it on, even
* if that wasn't the pre-dvpe() state.
* SMVP kernels manage VPE enable independently, but uniprocessor
* kernels need to turn it on, even if that wasn't the pre-dvpe() state.
*/
#ifdef CONFIG_SMP
evpe(vpeflags);
@@ -454,12 +453,11 @@ int __init vpe_module_init(void)
settc(tc);
/* Any TC that is bound to VPE0 gets left as is - in
* case we are running SMTC on VPE0. A TC that is bound
* to any other VPE gets bound to VPE0, ideally I'd like
* to make it homeless but it doesn't appear to let me
* bind a TC to a non-existent VPE. Which is perfectly
* reasonable.
/*
* A TC that is bound to any other VPE gets bound to
* VPE0, ideally I'd like to make it homeless but it
* doesn't appear to let me bind a TC to a non-existent
* VPE. Which is perfectly reasonable.
*
* The (un)bound state is visible to an EJTAG probe so
* may notify GDB...