sh: remove sh5 support

sh5 never became a product and has probably never really worked.

Remove it by recursively deleting all associated Kconfig options
and all corresponding files.

Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Rich Felker <dalias@libc.org>
Tento commit je obsažen v:
Arnd Bergmann
2020-04-20 11:37:12 +02:00
odevzdal Rich Felker
rodič d1f56f318d
revize 37744feebc
117 změnil soubory, kde provedl 67 přidání a 11554 odebrání

Zobrazit soubor

@@ -3,7 +3,7 @@
# Makefile for the Linux/SuperH kernel.
#
extra-y := head_$(BITS).o vmlinux.lds
extra-y := head_32.o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -13,26 +13,26 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dumpstack.o \
idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
idle.o io.o irq.o irq_32.o kdebugfs.o \
machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace.o ptrace_$(BITS).o \
process_32.o ptrace.o ptrace_32.o \
reboot.o return_address.o \
setup.o signal_$(BITS).o sys_sh.o \
syscalls_$(BITS).o time.o topology.o traps.o \
traps_$(BITS).o unwinder.o
setup.o signal_32.o sys_sh.o \
syscalls_32.o time.o topology.o traps.o \
traps_32.o unwinder.o
ifndef CONFIG_GENERIC_IOMAP
obj-y += iomap.o
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
endif
obj-$(CONFIG_SUPERH32) += sys_sh32.o
obj-y += sys_sh32.o
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

Zobrazit soubor

@@ -7,7 +7,6 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.

Zobrazit soubor

@@ -103,7 +103,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;

Zobrazit soubor

@@ -2,6 +2,5 @@
#
# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-y += imask.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o

Zobrazit soubor

@@ -1,194 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/irq/intc-sh5.c
*
* Interrupt Controller support for SH5 INTC.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* Per-interrupt selective. IRLM=0 (Fixed priority) is not
* supported being useless without a cascaded interrupt
* controller.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <cpu/irq.h>
#include <asm/page.h>
/*
* Maybe the generic Peripheral block could move to a more
* generic include file. INTC Block will be defined here
* and only here to make INTC self-contained in a single
* file.
*/
#define INTC_BLOCK_OFFSET 0x01000000
/* Base */
#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
INTC_BLOCK_OFFSET
/* Address */
#define INTC_ICR_SET (intc_virt + 0x0)
#define INTC_ICR_CLEAR (intc_virt + 0x8)
#define INTC_INTPRI_0 (intc_virt + 0x10)
#define INTC_INTSRC_0 (intc_virt + 0x50)
#define INTC_INTSRC_1 (intc_virt + 0x58)
#define INTC_INTREQ_0 (intc_virt + 0x60)
#define INTC_INTREQ_1 (intc_virt + 0x68)
#define INTC_INTENB_0 (intc_virt + 0x70)
#define INTC_INTENB_1 (intc_virt + 0x78)
#define INTC_INTDSB_0 (intc_virt + 0x80)
#define INTC_INTDSB_1 (intc_virt + 0x88)
#define INTC_ICR_IRLM 0x1
#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
/*
* Mapper between the vector ordinal and the IRQ number
* passed to kernel/device drivers.
*/
int intc_evt_to_irq[(0xE20/0x20)+1] = {
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
-1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
-1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
-1, -1 /* 0xE00 - 0xE20 */
};
static unsigned long intc_virt;
static int irlm; /* IRL mode */
static void enable_intc_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
printk("Trying to use straight IRL0-3 with an encoding platform.\n");
if (irq < 32) {
reg = INTC_INTENB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTENB_1;
bitmask = 1 << (irq - 32);
}
__raw_writel(bitmask, reg);
}
static void disable_intc_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
if (irq < 32) {
reg = INTC_INTDSB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTDSB_1;
bitmask = 1 << (irq - 32);
}
__raw_writel(bitmask, reg);
}
static struct irq_chip intc_irq_type = {
.name = "INTC",
.irq_enable = enable_intc_irq,
.irq_disable = disable_intc_irq,
};
void __init plat_irq_setup(void)
{
unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
unsigned long reg;
int i;
intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
if (!intc_virt) {
panic("Unable to remap INTC\n");
}
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++)
irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
__raw_writel(-1, INTC_INTDSB_0);
__raw_writel(-1, INTC_INTDSB_1);
for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
__raw_writel( NO_PRIORITY, reg);
#ifdef CONFIG_SH_CAYMAN
{
unsigned long data;
/* Set IRLM */
/* If all the priorities are set to 'no priority', then
* assume we are using encoded mode.
*/
irlm = platform_int_priority[IRQ_IRL0] +
platform_int_priority[IRQ_IRL1] +
platform_int_priority[IRQ_IRL2] +
platform_int_priority[IRQ_IRL3];
if (irlm == NO_PRIORITY) {
/* IRLM = 0 */
reg = INTC_ICR_CLEAR;
i = IRQ_INTA;
printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
} else {
/* IRLM = 1 */
reg = INTC_ICR_SET;
i = IRQ_IRL0;
}
__raw_writel(INTC_ICR_IRLM, reg);
/* Set interrupt priorities according to platform description */
for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
data |= platform_int_priority[i] <<
((i % INTC_INTPRI_PPREG) * 4);
if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
/* Upon the 7th, set Priority Register */
__raw_writel(data, reg);
data = 0;
reg += 8;
}
}
}
#endif
/*
* And now let interrupts come in.
* sti() is not enough, we need to
* lower priority, too.
*/
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}

Zobrazit soubor

@@ -24,7 +24,6 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
[CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",

Zobrazit soubor

@@ -1,16 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux/SuperH SH-5 backends.
#
obj-y := entry.o probe.o switchto.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_KALLSYMS) += unwind.o
# CPU subtype setup
obj-$(CONFIG_CPU_SH5) += setup-sh5.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH5) := clock-sh5.o
obj-y += $(clock-y)

Zobrazit soubor

@@ -1,76 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/clock-sh5.c
*
* SH-5 support for the clock framework
*
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/io.h>
static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
/* Clock, Power and Reset Controller */
#define CPRC_BLOCK_OFF 0x01010000
#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
static unsigned long cprc_base;
static void master_clk_init(struct clk *clk)
{
int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
clk->rate *= ifc_table[idx];
}
static struct sh_clk_ops sh5_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) & 0x0007);
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh5_clk_ops[] = {
&sh5_master_clk_ops,
&sh5_module_clk_ops,
&sh5_bus_clk_ops,
&sh5_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
BUG_ON(!cprc_base);
if (idx < ARRAY_SIZE(sh5_clk_ops))
*ops = sh5_clk_ops[idx];
}

Rozdílový obsah nebyl zobrazen, protože je příliš veliký Načíst rozdílové porovnání

Zobrazit soubor

@@ -1,106 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/fpu.c
*
* Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
* Copyright (C) 2002 STMicroelectronics Limited
* Author : Stuart Menefy
*
* Started from SH4 version:
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
void save_fpu(struct task_struct *tsk)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
"fst.p %0, (2*8), fp4\n\t"
"fst.p %0, (3*8), fp6\n\t"
"fst.p %0, (4*8), fp8\n\t"
"fst.p %0, (5*8), fp10\n\t"
"fst.p %0, (6*8), fp12\n\t"
"fst.p %0, (7*8), fp14\n\t"
"fst.p %0, (8*8), fp16\n\t"
"fst.p %0, (9*8), fp18\n\t"
"fst.p %0, (10*8), fp20\n\t"
"fst.p %0, (11*8), fp22\n\t"
"fst.p %0, (12*8), fp24\n\t"
"fst.p %0, (13*8), fp26\n\t"
"fst.p %0, (14*8), fp28\n\t"
"fst.p %0, (15*8), fp30\n\t"
"fst.p %0, (16*8), fp32\n\t"
"fst.p %0, (17*8), fp34\n\t"
"fst.p %0, (18*8), fp36\n\t"
"fst.p %0, (19*8), fp38\n\t"
"fst.p %0, (20*8), fp40\n\t"
"fst.p %0, (21*8), fp42\n\t"
"fst.p %0, (22*8), fp44\n\t"
"fst.p %0, (23*8), fp46\n\t"
"fst.p %0, (24*8), fp48\n\t"
"fst.p %0, (25*8), fp50\n\t"
"fst.p %0, (26*8), fp52\n\t"
"fst.p %0, (27*8), fp54\n\t"
"fst.p %0, (28*8), fp56\n\t"
"fst.p %0, (29*8), fp58\n\t"
"fst.p %0, (30*8), fp60\n\t"
"fst.p %0, (31*8), fp62\n\t"
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
void restore_fpu(struct task_struct *tsk)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
"fld.p %0, (2*8), fp4\n\t"
"fld.p %0, (3*8), fp6\n\t"
"fld.p %0, (4*8), fp8\n\t"
"fld.p %0, (5*8), fp10\n\t"
"fld.p %0, (6*8), fp12\n\t"
"fld.p %0, (7*8), fp14\n\t"
"fld.p %0, (8*8), fp16\n\t"
"fld.p %0, (9*8), fp18\n\t"
"fld.p %0, (10*8), fp20\n\t"
"fld.p %0, (11*8), fp22\n\t"
"fld.p %0, (12*8), fp24\n\t"
"fld.p %0, (13*8), fp26\n\t"
"fld.p %0, (14*8), fp28\n\t"
"fld.p %0, (15*8), fp30\n\t"
"fld.p %0, (16*8), fp32\n\t"
"fld.p %0, (17*8), fp34\n\t"
"fld.p %0, (18*8), fp36\n\t"
"fld.p %0, (19*8), fp38\n\t"
"fld.p %0, (20*8), fp40\n\t"
"fld.p %0, (21*8), fp42\n\t"
"fld.p %0, (22*8), fp44\n\t"
"fld.p %0, (23*8), fp46\n\t"
"fld.p %0, (24*8), fp48\n\t"
"fld.p %0, (25*8), fp50\n\t"
"fld.p %0, (26*8), fp52\n\t"
"fld.p %0, (27*8), fp54\n\t"
"fld.p %0, (28*8), fp56\n\t"
"fld.p %0, (29*8), fp58\n\t"
"fld.p %0, (30*8), fp60\n\t"
"fld.s %0, (32*8), fr63\n\t"
"fputscr fr63\n\t"
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
regs->pc += 4;
force_sig(SIGFPE);
}

Zobrazit soubor

@@ -1,72 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/probe.c
*
* CPU Subtype Probing for SH-5.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/tlb.h>
void cpu_probe(void)
{
unsigned long long cir;
/*
* Do peeks in real mode to avoid having to set up a mapping for
* the WPC registers. On SH5-101 cut2, such a mapping would be
* exposed to an address translation erratum which would make it
* hard to set up correctly.
*/
cir = peek_real_address_q(0x0d000008);
if ((cir & 0xffff) == 0x5103)
boot_cpu_data.type = CPU_SH5_103;
else if (((cir >> 32) & 0xffff) == 0x51e2)
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
boot_cpu_data.family = CPU_FAMILY_SH5;
/*
* First, setup some sane values for the I-cache.
*/
boot_cpu_data.icache.ways = 4;
boot_cpu_data.icache.sets = 256;
boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
boot_cpu_data.icache.way_incr = (1 << 13);
boot_cpu_data.icache.entry_shift = 5;
boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
boot_cpu_data.icache.linesz;
boot_cpu_data.icache.entry_mask = 0x1fe0;
boot_cpu_data.icache.flags = 0;
/*
* Next, setup some sane values for the D-cache.
*
* On the SH5, these are pretty consistent with the I-cache settings,
* so we just copy over the existing definitions.. these can be fixed
* up later, especially if we add runtime CPU probing.
*
* Though in the meantime it saves us from having to duplicate all of
* the above definitions..
*/
boot_cpu_data.dcache = boot_cpu_data.icache;
/*
* Setup any cache-related flags here
*/
#if defined(CONFIG_CACHE_WRITETHROUGH)
set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
#elif defined(CONFIG_CACHE_WRITEBACK)
set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
#endif
/* Setup some I/D TLB defaults */
sh64_tlb_init();
}

Zobrazit soubor

@@ -1,121 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SH5-101/SH5-103 CPU Setup
*
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sh_timer.h>
#include <asm/addrspace.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.flags = UPF_IOREMAP,
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
DEFINE_RES_IRQ(39),
DEFINE_RES_IRQ(40),
DEFINE_RES_IRQ(42),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
.end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Period IRQ */
.start = IRQ_PRI,
.flags = IORESOURCE_IRQ,
},
[2] = {
/* Carry IRQ */
.start = IRQ_CUI,
.flags = IORESOURCE_IRQ,
},
[3] = {
/* Alarm IRQ */
.start = IRQ_ATI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
#define TMU_BLOCK_OFF 0x01020000
#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(TMU_BASE, 0x30),
DEFINE_RES_IRQ(IRQ_TUNI0),
DEFINE_RES_IRQ(IRQ_TUNI1),
DEFINE_RES_IRQ(IRQ_TUNI2),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh5_early_devices[] __initdata = {
&scif0_device,
&tmu0_device,
};
static struct platform_device *sh5_devices[] __initdata = {
&rtc_device,
};
static int __init sh5_devices_setup(void)
{
int ret;
ret = platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
if (unlikely(ret != 0))
return ret;
return platform_add_devices(sh5_devices,
ARRAY_SIZE(sh5_devices));
}
arch_initcall(sh5_devices_setup);
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
}

Zobrazit soubor

@@ -1,195 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh5/switchto.S
*
* sh64 context switch
*
* Copyright (C) 2004 Richard Curnow
*/
.section .text..SHmedia32,"ax"
.little
.balign 32
.type sh64_switch_to,@function
.global sh64_switch_to
.global __sh64_switch_to_end
sh64_switch_to:
/* Incoming args
r2 - prev
r3 - &prev->thread
r4 - next
r5 - &next->thread
Outgoing results
r2 - last (=prev) : this just stays in r2 throughout
Want to create a full (struct pt_regs) on the stack to allow backtracing
functions to work. However, we only need to populate the callee-save
register slots in this structure; since we're a function our ancestors must
have themselves preserved all caller saved state in the stack. This saves
some wasted effort since we won't need to look at the values.
In particular, all caller-save registers are immediately available for
scratch use.
*/
#define FRAME_SIZE (76*8 + 8)
movi FRAME_SIZE, r0
sub.l r15, r0, r15
! Do normal-style register save to support backtrace
st.l r15, 0, r18 ! save link reg
st.l r15, 4, r14 ! save fp
add.l r15, r63, r14 ! setup frame pointer
! hopefully this looks normal to the backtrace now.
addi.l r15, 8, r1 ! base of pt_regs
addi.l r1, 24, r0 ! base of pt_regs.regs
addi.l r0, (63*8), r8 ! base of pt_regs.trregs
/* Note : to be fixed?
struct pt_regs is really designed for holding the state on entry
to an exception, i.e. pc,sr,regs etc. However, for the context
switch state, some of this is not required. But the unwinder takes
struct pt_regs * as an arg so we have to build this structure
to allow unwinding switched tasks in show_state() */
st.q r0, ( 9*8), r9
st.q r0, (10*8), r10
st.q r0, (11*8), r11
st.q r0, (12*8), r12
st.q r0, (13*8), r13
st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
! the point where the process is left in suspended animation, i.e. current
! fp here, not the saved one.
st.q r0, (16*8), r16
st.q r0, (24*8), r24
st.q r0, (25*8), r25
st.q r0, (26*8), r26
st.q r0, (27*8), r27
st.q r0, (28*8), r28
st.q r0, (29*8), r29
st.q r0, (30*8), r30
st.q r0, (31*8), r31
st.q r0, (32*8), r32
st.q r0, (33*8), r33
st.q r0, (34*8), r34
st.q r0, (35*8), r35
st.q r0, (44*8), r44
st.q r0, (45*8), r45
st.q r0, (46*8), r46
st.q r0, (47*8), r47
st.q r0, (48*8), r48
st.q r0, (49*8), r49
st.q r0, (50*8), r50
st.q r0, (51*8), r51
st.q r0, (52*8), r52
st.q r0, (53*8), r53
st.q r0, (54*8), r54
st.q r0, (55*8), r55
st.q r0, (56*8), r56
st.q r0, (57*8), r57
st.q r0, (58*8), r58
st.q r0, (59*8), r59
! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
! Use a local label to avoid creating a symbol that will confuse the !
! backtrace
pta .Lsave_pc, tr0
gettr tr5, r45
gettr tr6, r46
gettr tr7, r47
st.q r8, (5*8), r45
st.q r8, (6*8), r46
st.q r8, (7*8), r47
! Now switch context
gettr tr0, r9
st.l r3, 0, r15 ! prev->thread.sp
st.l r3, 8, r1 ! prev->thread.kregs
st.l r3, 4, r9 ! prev->thread.pc
st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
! Load PC for next task (init value or save_pc later)
ld.l r5, 4, r18 ! next->thread.pc
! Switch stacks
ld.l r5, 0, r15 ! next->thread.sp
ptabs r18, tr0
! Update current
ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
putcon r9, kcr0 ! current = next->thread_info
! go to save_pc for a reschedule, or the initial thread.pc for a new process
blink tr0, r63
! Restore (when we come back to a previously saved task)
.Lsave_pc:
addi.l r15, 32, r0 ! r0 = next's regs
addi.l r0, (63*8), r8 ! r8 = next's tr_regs
ld.q r8, (5*8), r45
ld.q r8, (6*8), r46
ld.q r8, (7*8), r47
ptabs r45, tr5
ptabs r46, tr6
ptabs r47, tr7
ld.q r0, ( 9*8), r9
ld.q r0, (10*8), r10
ld.q r0, (11*8), r11
ld.q r0, (12*8), r12
ld.q r0, (13*8), r13
ld.q r0, (14*8), r14
ld.q r0, (16*8), r16
ld.q r0, (24*8), r24
ld.q r0, (25*8), r25
ld.q r0, (26*8), r26
ld.q r0, (27*8), r27
ld.q r0, (28*8), r28
ld.q r0, (29*8), r29
ld.q r0, (30*8), r30
ld.q r0, (31*8), r31
ld.q r0, (32*8), r32
ld.q r0, (33*8), r33
ld.q r0, (34*8), r34
ld.q r0, (35*8), r35
ld.q r0, (44*8), r44
ld.q r0, (45*8), r45
ld.q r0, (46*8), r46
ld.q r0, (47*8), r47
ld.q r0, (48*8), r48
ld.q r0, (49*8), r49
ld.q r0, (50*8), r50
ld.q r0, (51*8), r51
ld.q r0, (52*8), r52
ld.q r0, (53*8), r53
ld.q r0, (54*8), r54
ld.q r0, (55*8), r55
ld.q r0, (56*8), r56
ld.q r0, (57*8), r57
ld.q r0, (58*8), r58
ld.q r0, (59*8), r59
! epilogue
ld.l r15, 0, r18
ld.l r15, 4, r14
ptabs r18, tr0
movi FRAME_SIZE, r0
add r15, r0, r15
blink tr0, r63
__sh64_switch_to_end:
.LFE1:
.size sh64_switch_to,.LFE1-sh64_switch_to

Zobrazit soubor

@@ -1,342 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/unwind.c
*
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
static u8 regcache[63];
/*
* Finding the previous stack frame isn't horribly straightforward as it is
* on some other platforms. In the sh64 case, we don't have "linked" stack
* frames, so we need to do a bit of work to determine the previous frame,
* and in turn, the previous r14/r18 pair.
*
* There are generally a few cases which determine where we can find out
* the r14/r18 values. In the general case, this can be determined by poking
* around the prologue of the symbol PC is in (note that we absolutely must
* have frame pointer support as well as the kernel symbol table mapped,
* otherwise we can't even get this far).
*
* In other cases, such as the interrupt/exception path, we can poke around
* the sp/fp.
*
* Notably, this entire approach is somewhat error prone, and in the event
* that the previous frame cannot be determined, that's all we can do.
* Either way, this still leaves us with a more correct backtrace then what
* we would be able to come up with by walking the stack (which is garbage
* for anything beyond the first frame).
* -- PFM.
*/
static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
unsigned long *pprev_fp, unsigned long *pprev_pc,
struct pt_regs *regs)
{
const char *sym;
char namebuf[128];
unsigned long offset;
unsigned long prologue = 0;
unsigned long fp_displacement = 0;
unsigned long fp_prev = 0;
unsigned long offset_r14 = 0, offset_r18 = 0;
int i, found_prologue_end = 0;
sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
if (!sym)
return -EINVAL;
prologue = pc - offset;
if (!prologue)
return -EINVAL;
/* Validate fp, to avoid risk of dereferencing a bad pointer later.
Assume 128Mb since that's the amount of RAM on a Cayman. Modify
when there is an SH-5 board with more. */
if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
(fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
((fp & 7) != 0)) {
return -EINVAL;
}
/*
* Depth to walk, depth is completely arbitrary.
*/
for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
unsigned long op;
u8 major, minor;
u8 src, dest, disp;
op = *(unsigned long *)prologue;
major = (op >> 26) & 0x3f;
src = (op >> 20) & 0x3f;
minor = (op >> 16) & 0xf;
disp = (op >> 10) & 0x3f;
dest = (op >> 4) & 0x3f;
/*
* Stack frame creation happens in a number of ways.. in the
* general case when the stack frame is less than 511 bytes,
* it's generally created by an addi or addi.l:
*
* addi/addi.l r15, -FRAME_SIZE, r15
*
* in the event that the frame size is bigger than this, it's
* typically created using a movi/sub pair as follows:
*
* movi FRAME_SIZE, rX
* sub r15, rX, r15
*/
switch (major) {
case (0x00 >> 2):
switch (minor) {
case 0x8: /* add.l */
case 0x9: /* add */
/* Look for r15, r63, r14 */
if (src == 15 && disp == 63 && dest == 14)
found_prologue_end = 1;
break;
case 0xa: /* sub.l */
case 0xb: /* sub */
if (src != 15 || dest != 15)
continue;
fp_displacement -= regcache[disp];
fp_prev = fp - fp_displacement;
break;
}
break;
case (0xa8 >> 2): /* st.l */
if (src != 15)
continue;
switch (dest) {
case 14:
if (offset_r14 || fp_displacement == 0)
continue;
offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r14 *= sizeof(unsigned long);
offset_r14 += fp_displacement;
break;
case 18:
if (offset_r18 || fp_displacement == 0)
continue;
offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r18 *= sizeof(unsigned long);
offset_r18 += fp_displacement;
break;
}
break;
case (0xcc >> 2): /* movi */
if (dest >= 63) {
printk(KERN_NOTICE "%s: Invalid dest reg %d "
"specified in movi handler. Failed "
"opcode was 0x%lx: ", __func__,
dest, op);
continue;
}
/* Sign extend */
regcache[dest] =
sign_extend64((((u64)op >> 10) & 0xffff), 9);
break;
case (0xd0 >> 2): /* addi */
case (0xd4 >> 2): /* addi.l */
/* Look for r15, -FRAME_SIZE, r15 */
if (src != 15 || dest != 15)
continue;
/* Sign extended frame size.. */
fp_displacement +=
(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
fp_prev = fp - fp_displacement;
break;
}
if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
break;
}
if (offset_r14 == 0 || fp_prev == 0) {
if (!offset_r14)
pr_debug("Unable to find r14 offset\n");
if (!fp_prev)
pr_debug("Unable to find previous fp\n");
return -EINVAL;
}
/* For innermost leaf function, there might not be a offset_r18 */
if (!*pprev_pc && (offset_r18 == 0))
return -EINVAL;
*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
if (offset_r18)
*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
*pprev_pc &= ~1;
return 0;
}
/*
* Don't put this on the stack since we'll want to call in to
* sh64_unwinder_dump() when we're close to underflowing the stack
* anyway.
*/
static struct pt_regs here_regs;
extern const char syscall_ret;
extern const char ret_from_syscall;
extern const char ret_from_exception;
extern const char ret_from_irq;
static void sh64_unwind_inner(const struct stacktrace_ops *ops,
void *data, struct pt_regs *regs);
static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
unsigned long pc, unsigned long fp)
{
if ((fp >= __MEMORY_START) &&
((fp & 7) == 0))
sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
}
static void sh64_unwind_inner(const struct stacktrace_ops *ops,
void *data, struct pt_regs *regs)
{
unsigned long pc, fp;
int ofs = 0;
int first_pass;
pc = regs->pc & ~1;
fp = regs->regs[14];
first_pass = 1;
for (;;) {
int cond;
unsigned long next_fp, next_pc;
if (pc == ((unsigned long)&syscall_ret & ~1)) {
printk("SYSCALL\n");
unwind_nested(ops, data, pc, fp);
return;
}
if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
printk("SYSCALL (PREEMPTED)\n");
unwind_nested(ops, data, pc, fp);
return;
}
/* In this case, the PC is discovered by lookup_prev_stack_frame but
it has 4 taken off it to look like the 'caller' */
if (pc == ((unsigned long)&ret_from_exception & ~1)) {
printk("EXCEPTION\n");
unwind_nested(ops, data, pc, fp);
return;
}
if (pc == ((unsigned long)&ret_from_irq & ~1)) {
printk("IRQ\n");
unwind_nested(ops, data, pc, fp);
return;
}
cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
((pc & 3) == 0) && ((fp & 7) == 0));
pc -= ofs;
ops->address(data, pc, 1);
if (first_pass) {
/* If the innermost frame is a leaf function, it's
* possible that r18 is never saved out to the stack.
*/
next_pc = regs->regs[18];
} else {
next_pc = 0;
}
if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
ofs = sizeof(unsigned long);
pc = next_pc & ~1;
fp = next_fp;
} else {
printk("Unable to lookup previous stack frame\n");
break;
}
first_pass = 0;
}
printk("\n");
}
static void sh64_unwinder_dump(struct task_struct *task,
struct pt_regs *regs,
unsigned long *sp,
const struct stacktrace_ops *ops,
void *data)
{
if (!regs) {
/*
* Fetch current regs if we have no other saved state to back
* trace from.
*/
regs = &here_regs;
__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
__asm__ __volatile__ (
"pta 0f, tr0\n\t"
"blink tr0, %0\n\t"
"0: nop"
: "=r" (regs->pc)
);
}
sh64_unwind_inner(ops, data, regs);
}
static struct unwinder sh64_unwinder = {
.name = "sh64-unwinder",
.dump = sh64_unwinder_dump,
.rating = 150,
};
static int __init sh64_unwinder_init(void)
{
return unwinder_register(&sh64_unwinder);
}
early_initcall(sh64_unwinder_init);

Zobrazit soubor

@@ -1,346 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/head_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*/
#include <linux/init.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/tlb.h>
#include <cpu/registers.h>
#include <cpu/mmu_context.h>
#include <asm/thread_info.h>
/*
* MMU defines: TLB boundaries.
*/
#define MMUIR_FIRST ITLB_FIXED
#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUIR_STEP TLB_STEP
#define MMUDR_FIRST DTLB_FIXED
#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUDR_STEP TLB_STEP
/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
#endif
/*
* MMU defines: Fixed TLBs.
*/
/* Deal safely with the case where the base of RAM is not 512Mb aligned */
#define ALIGN_512M_MASK (0xffffffffe0000000)
#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
#ifdef CONFIG_CACHE_OFF
#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
#else
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#endif
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#if defined (CONFIG_CACHE_OFF)
#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
#elif defined (CONFIG_CACHE_WRITETHROUGH)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
/* WT, invalidate */
#elif defined (CONFIG_CACHE_WRITEBACK)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
/* WB, invalidate */
#else
#error preprocessor flag CONFIG_CACHE_... not recognized!
#endif
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.section .empty_zero_page, "aw"
.global empty_zero_page
empty_zero_page:
.long 1 /* MOUNT_ROOT_RDONLY */
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
.long 0x00800000 /* INITRD_START */
.long 0x00800000 /* INITRD_SIZE */
.long 0
.text
.balign 4096,0,4096
.section .data, "aw"
.balign PAGE_SIZE
.section .data, "aw"
.balign PAGE_SIZE
.global mmu_pdtp_cache
mmu_pdtp_cache:
.space PAGE_SIZE, 0
.global fpu_in_use
fpu_in_use: .quad 0
__HEAD
.balign L1_CACHE_BYTES
/*
* Condition at the entry of __stext:
* . Reset state:
* . SR.FD = 1 (FPU disabled)
* . SR.BL = 1 (Exceptions disabled)
* . SR.MD = 1 (Privileged Mode)
* . SR.MMU = 0 (MMU Disabled)
* . SR.CD = 0 (CTC User Visible)
* . SR.IMASK = Undefined (Interrupt Mask)
*
* Operations supposed to be performed by __stext:
* . prevent speculative fetch onto device memory while MMU is off
* . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
* . first, save CPU state and set it to something harmless
* . any CPU detection and/or endianness settings (?)
* . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
* . set initial TLB entries for cached and uncached regions
* (no fine granularity paging)
* . set initial cache state
* . enable MMU and caches
* . set CPU to a consistent state
* . registers (including stack pointer and current/KCR0)
* . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
* at this stage. This is all to later Linux initialization steps.
* . initialize FPU
* . clear BSS
* . jump into start_kernel()
* . be prepared to hopeless start_kernel() returns.
*
*/
.global _stext
_stext:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
*/
ptabs/u ZERO, tr0
ptabs/u ZERO, tr1
ptabs/u ZERO, tr2
ptabs/u ZERO, tr3
ptabs/u ZERO, tr4
ptabs/u ZERO, tr5
ptabs/u ZERO, tr6
ptabs/u ZERO, tr7
synci
/*
* Read/Set CPU state. After this block:
* r29 = Initial SR
*/
getcon SR, r29
movi SR_HARMLESS, r20
putcon r20, SR
/*
* Initialize EMI/LMI. To Be Done.
*/
/*
* CPU detection and/or endianness settings (?). To Be Done.
* Pure PIC code here, please ! Just save state into r30.
* After this block:
* r30 = CPU type/Platform Endianness
*/
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta clear_ITLB, tr1
movi MMUIR_FIRST, r21
movi MMUIR_END, r22
clear_ITLB:
putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
addi r21, MMUIR_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta clear_DTLB, tr1
movi MMUDR_FIRST, r21
movi MMUDR_END, r22
clear_DTLB:
putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
addi r21, MMUDR_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi MMUIR_FIRST, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi MMUDR_FIRST, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
/*
* Setup a DTLB translation for SCIF phys.
*/
addi r21, MMUDR_STEP, r21
movi 0x0a03, r22 /* SCIF phys */
shori 0x0148, r22
putcfg r21, 1, r22 /* PTEL first */
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
/*
* Set cache behaviours.
*/
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
/*
* Enable Caches and MMU. Do the first non-PIC jump.
* Now head.S global variables, constants and externs
* can be used.
*/
getcon SR, r21
movi SR_ENABLE_MMU, r22
or r21, r22, r21
putcon r21, SSR
movi hyperspace, r22
ori r22, 1, r22 /* Make it SHmedia, not required but..*/
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
hyperspace: /* ... that's the next instruction ! */
/*
* Set CPU to a consistent state.
* r31 = FPU support flag
* tr0/tr7 in use. Others give a chance to loop somewhere safe
*/
movi start_kernel, r32
ori r32, 1, r32
ptabs r32, tr0 /* r32 = _start_kernel address */
pta/u hopeless, tr1
pta/u hopeless, tr2
pta/u hopeless, tr3
pta/u hopeless, tr4
pta/u hopeless, tr5
pta/u hopeless, tr6
pta/u hopeless, tr7
gettr tr1, r28 /* r28 = hopeless address */
/* Set initial stack pointer */
movi init_thread_union, SP
putcon SP, KCR0 /* Set current to init_task */
movi THREAD_SIZE, r22 /* Point to the end */
add SP, r22, SP
/*
* Initialize FPU.
* Keep FPU flag in r31. After this block:
* r31 = FPU flag
*/
movi fpu_in_use, r31 /* Temporary */
#ifdef CONFIG_SH_FPU
getcon SR, r21
movi SR_ENABLE_FPU, r22
and r21, r22, r22
putcon r22, SR /* Try to enable */
getcon SR, r22
xor r21, r22, r21
shlri r21, 15, r21 /* Supposedly 0/1 */
st.q r31, 0 , r21 /* Set fpu_in_use */
#else
movi 0, r21
st.q r31, 0 , r21 /* Set fpu_in_use */
#endif
or r21, ZERO, r31 /* Set FPU flag at last */
#ifndef CONFIG_SH_NO_BSS_INIT
/* Don't clear BSS if running on slow platforms such as an RTL simulation,
remote memory via SHdebug link, etc. For these the memory can be guaranteed
to be all zero on boot anyway. */
/*
* Clear bss
*/
pta clear_quad, tr1
movi __bss_start, r22
movi _end, r23
clear_quad:
st.q r22, 0, ZERO
addi r22, 8, r22
bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
#endif
pta/u hopeless, tr1
/* Say bye to head.S but be prepared to wrongly get back ... */
blink tr0, LINK
/* If we ever get back here through LINK/tr1-tr7 */
pta/u hopeless, tr7
hopeless:
/*
* Something's badly wrong here. Loop endlessly,
* there's nothing more we can do about it.
*
* Note on hopeless: it can be jumped into invariably
* before or after jumping into hyperspace. The only
* requirement is to be PIC called (PTA) before and
* any way (PTA/PTABS) after. According to Virtual
* to Physical mapping a simulator/emulator can easily
* tell where we came here from just looking at hopeless
* (PC) address.
*
* For debugging purposes:
* (r28) hopeless/loop address
* (r29) Original SR
* (r30) CPU type/Platform endianness
* (r31) FPU Support
* (r32) _start_kernel address
*/
blink tr7, ZERO

Zobrazit soubor

@@ -1,48 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SHmedia irqflags support
*
* Copyright (C) 2006 - 2009 Paul Mundt
*/
#include <linux/irqflags.h>
#include <linux/module.h>
#include <cpu/registers.h>
void notrace arch_local_irq_restore(unsigned long flags)
{
unsigned long long __dummy;
if (flags == ARCH_IRQ_DISABLED) {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (ARCH_IRQ_DISABLED)
);
} else {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (~ARCH_IRQ_DISABLED)
);
}
}
EXPORT_SYMBOL(arch_local_irq_restore);
unsigned long notrace arch_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0"
: "=&r" (flags)
: "r" (ARCH_IRQ_DISABLED)
);
return flags;
}
EXPORT_SYMBOL(arch_local_save_flags);

Zobrazit soubor

@@ -46,15 +46,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
* whether the symbol is SHmedia (1) or SHcompact (0). If
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
relocation |= !!(sym->st_other & 4);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;

Zobrazit soubor

@@ -23,9 +23,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
#ifdef CONFIG_SUPERH32
unlazy_fpu(src, task_pt_regs(src));
#endif
*dst = *src;
if (src->thread.xstate) {

Zobrazit soubor

@@ -1,461 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/process_64.c
*
* This file handles the architecture-dependent parts of process handling..
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* Started from SH3/4 version:
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
* In turn started from i386 version:
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/switch_to.h>
struct task_struct *last_task_used_math = NULL;
struct pt_regs fake_swapper_regs = { 0, };
void show_regs(struct pt_regs *regs)
{
unsigned long long ah, al, bh, bl, ch, cl;
printk("\n");
show_regs_print_info(KERN_DEFAULT);
ah = (regs->pc) >> 32;
al = (regs->pc) & 0xffffffff;
bh = (regs->regs[18]) >> 32;
bl = (regs->regs[18]) & 0xffffffff;
ch = (regs->regs[15]) >> 32;
cl = (regs->regs[15]) & 0xffffffff;
printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->sr) >> 32;
al = (regs->sr) & 0xffffffff;
asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
bh = (bh) >> 32;
bl = (bl) & 0xffffffff;
asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
ch = (ch) >> 32;
cl = (cl) & 0xffffffff;
printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[0]) >> 32;
al = (regs->regs[0]) & 0xffffffff;
bh = (regs->regs[1]) >> 32;
bl = (regs->regs[1]) & 0xffffffff;
ch = (regs->regs[2]) >> 32;
cl = (regs->regs[2]) & 0xffffffff;
printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[3]) >> 32;
al = (regs->regs[3]) & 0xffffffff;
bh = (regs->regs[4]) >> 32;
bl = (regs->regs[4]) & 0xffffffff;
ch = (regs->regs[5]) >> 32;
cl = (regs->regs[5]) & 0xffffffff;
printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[6]) >> 32;
al = (regs->regs[6]) & 0xffffffff;
bh = (regs->regs[7]) >> 32;
bl = (regs->regs[7]) & 0xffffffff;
ch = (regs->regs[8]) >> 32;
cl = (regs->regs[8]) & 0xffffffff;
printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[9]) >> 32;
al = (regs->regs[9]) & 0xffffffff;
bh = (regs->regs[10]) >> 32;
bl = (regs->regs[10]) & 0xffffffff;
ch = (regs->regs[11]) >> 32;
cl = (regs->regs[11]) & 0xffffffff;
printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[12]) >> 32;
al = (regs->regs[12]) & 0xffffffff;
bh = (regs->regs[13]) >> 32;
bl = (regs->regs[13]) & 0xffffffff;
ch = (regs->regs[14]) >> 32;
cl = (regs->regs[14]) & 0xffffffff;
printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[16]) >> 32;
al = (regs->regs[16]) & 0xffffffff;
bh = (regs->regs[17]) >> 32;
bl = (regs->regs[17]) & 0xffffffff;
ch = (regs->regs[19]) >> 32;
cl = (regs->regs[19]) & 0xffffffff;
printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[20]) >> 32;
al = (regs->regs[20]) & 0xffffffff;
bh = (regs->regs[21]) >> 32;
bl = (regs->regs[21]) & 0xffffffff;
ch = (regs->regs[22]) >> 32;
cl = (regs->regs[22]) & 0xffffffff;
printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[23]) >> 32;
al = (regs->regs[23]) & 0xffffffff;
bh = (regs->regs[24]) >> 32;
bl = (regs->regs[24]) & 0xffffffff;
ch = (regs->regs[25]) >> 32;
cl = (regs->regs[25]) & 0xffffffff;
printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[26]) >> 32;
al = (regs->regs[26]) & 0xffffffff;
bh = (regs->regs[27]) >> 32;
bl = (regs->regs[27]) & 0xffffffff;
ch = (regs->regs[28]) >> 32;
cl = (regs->regs[28]) & 0xffffffff;
printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[29]) >> 32;
al = (regs->regs[29]) & 0xffffffff;
bh = (regs->regs[30]) >> 32;
bl = (regs->regs[30]) & 0xffffffff;
ch = (regs->regs[31]) >> 32;
cl = (regs->regs[31]) & 0xffffffff;
printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[32]) >> 32;
al = (regs->regs[32]) & 0xffffffff;
bh = (regs->regs[33]) >> 32;
bl = (regs->regs[33]) & 0xffffffff;
ch = (regs->regs[34]) >> 32;
cl = (regs->regs[34]) & 0xffffffff;
printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[35]) >> 32;
al = (regs->regs[35]) & 0xffffffff;
bh = (regs->regs[36]) >> 32;
bl = (regs->regs[36]) & 0xffffffff;
ch = (regs->regs[37]) >> 32;
cl = (regs->regs[37]) & 0xffffffff;
printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[38]) >> 32;
al = (regs->regs[38]) & 0xffffffff;
bh = (regs->regs[39]) >> 32;
bl = (regs->regs[39]) & 0xffffffff;
ch = (regs->regs[40]) >> 32;
cl = (regs->regs[40]) & 0xffffffff;
printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[41]) >> 32;
al = (regs->regs[41]) & 0xffffffff;
bh = (regs->regs[42]) >> 32;
bl = (regs->regs[42]) & 0xffffffff;
ch = (regs->regs[43]) >> 32;
cl = (regs->regs[43]) & 0xffffffff;
printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[44]) >> 32;
al = (regs->regs[44]) & 0xffffffff;
bh = (regs->regs[45]) >> 32;
bl = (regs->regs[45]) & 0xffffffff;
ch = (regs->regs[46]) >> 32;
cl = (regs->regs[46]) & 0xffffffff;
printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[47]) >> 32;
al = (regs->regs[47]) & 0xffffffff;
bh = (regs->regs[48]) >> 32;
bl = (regs->regs[48]) & 0xffffffff;
ch = (regs->regs[49]) >> 32;
cl = (regs->regs[49]) & 0xffffffff;
printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[50]) >> 32;
al = (regs->regs[50]) & 0xffffffff;
bh = (regs->regs[51]) >> 32;
bl = (regs->regs[51]) & 0xffffffff;
ch = (regs->regs[52]) >> 32;
cl = (regs->regs[52]) & 0xffffffff;
printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[53]) >> 32;
al = (regs->regs[53]) & 0xffffffff;
bh = (regs->regs[54]) >> 32;
bl = (regs->regs[54]) & 0xffffffff;
ch = (regs->regs[55]) >> 32;
cl = (regs->regs[55]) & 0xffffffff;
printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[56]) >> 32;
al = (regs->regs[56]) & 0xffffffff;
bh = (regs->regs[57]) >> 32;
bl = (regs->regs[57]) & 0xffffffff;
ch = (regs->regs[58]) >> 32;
cl = (regs->regs[58]) & 0xffffffff;
printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[59]) >> 32;
al = (regs->regs[59]) & 0xffffffff;
bh = (regs->regs[60]) >> 32;
bl = (regs->regs[60]) & 0xffffffff;
ch = (regs->regs[61]) >> 32;
cl = (regs->regs[61]) & 0xffffffff;
printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[62]) >> 32;
al = (regs->regs[62]) & 0xffffffff;
bh = (regs->tregs[0]) >> 32;
bl = (regs->tregs[0]) & 0xffffffff;
ch = (regs->tregs[1]) >> 32;
cl = (regs->tregs[1]) & 0xffffffff;
printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[2]) >> 32;
al = (regs->tregs[2]) & 0xffffffff;
bh = (regs->tregs[3]) >> 32;
bl = (regs->tregs[3]) & 0xffffffff;
ch = (regs->tregs[4]) >> 32;
cl = (regs->tregs[4]) & 0xffffffff;
printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[5]) >> 32;
al = (regs->tregs[5]) & 0xffffffff;
bh = (regs->tregs[6]) >> 32;
bl = (regs->tregs[6]) & 0xffffffff;
ch = (regs->tregs[7]) >> 32;
cl = (regs->tregs[7]) & 0xffffffff;
printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
/*
* If we're in kernel mode, dump the stack too..
*/
if (!user_mode(regs)) {
void show_stack(struct task_struct *tsk, unsigned long *sp);
unsigned long sp = regs->regs[15] & 0xffffffff;
struct task_struct *tsk = get_current();
tsk->thread.kregs = regs;
show_stack(tsk, (unsigned long *)sp);
}
}
/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
/*
* See arch/sparc/kernel/process.c for the precedent for doing
* this -- RPC.
*
* The SH-5 FPU save/restore approach relies on
* last_task_used_math pointing to a live task_struct. When
* another task tries to use the FPU for the 1st time, the FPUDIS
* trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
* existing FPU state to the FP regs field within
* last_task_used_math before re-loading the new task's FPU state
* (or initialising it if the FPU has been used before). So if
* last_task_used_math is stale, and its page has already been
* re-allocated for another use, the consequences are rather
* grim. Unless we null it here, there is no other path through
* which it would get safely nulled.
*/
#ifdef CONFIG_SH_FPU
if (last_task_used_math == tsk)
last_task_used_math = NULL;
#endif
}
void flush_thread(void)
{
/* Called by fs/exec.c (setup_new_exec) to remove traces of a
* previously running executable. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
last_task_used_math = NULL;
}
/* Force FPU state to be reinitialised after exec */
clear_used_math();
#endif
/* if we are a kernel thread, about to change to user thread,
* update kreg
*/
if(current->thread.kregs==&fake_swapper_regs) {
current->thread.kregs =
((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
current->thread.uregs = current->thread.kregs;
}
}
void release_thread(struct task_struct *dead_task)
{
/* do nothing */
}
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
#ifdef CONFIG_SH_FPU
int fpvalid;
struct task_struct *tsk = current;
fpvalid = !!tsk_used_math(tsk);
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
save_fpu(tsk);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
#else
return 0; /* Task didn't use the fpu at all. */
#endif
}
EXPORT_SYMBOL(dump_fpu);
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs;
#ifdef CONFIG_SH_FPU
/* can't happen for a kernel thread */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
current_pt_regs()->sr |= SR_FD;
}
#endif
/* Copy from sh version */
childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
p->thread.sp = (unsigned long) childregs;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->regs[2] = (unsigned long)arg;
childregs->regs[3] = (unsigned long)usp;
childregs->sr = (1 << 30); /* not user_mode */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_kernel_thread;
return 0;
}
*childregs = *current_pt_regs();
/*
* Sign extend the edited stack.
* Note that thread.pc and thread.pc will stay
* 32-bit wide and context switch must take care
* of NEFF sign extension.
*/
if (usp)
childregs->regs[15] = neff_sign_extend(usp);
p->thread.uregs = childregs;
childregs->regs[9] = 0; /* Set return value for child */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_fork;
return 0;
}
#ifdef CONFIG_FRAME_POINTER
static int in_sh64_switch_to(unsigned long pc)
{
extern char __sh64_switch_to_end;
/* For a sleeping task, the PC is somewhere in the middle of the function,
so we don't have to worry about masking the LSB off */
return (pc >= (unsigned long) sh64_switch_to) &&
(pc < (unsigned long) &__sh64_switch_to_end);
}
#endif
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(p);
#ifdef CONFIG_FRAME_POINTER
if (in_sh64_switch_to(pc)) {
unsigned long schedule_fp;
unsigned long sh64_switch_to_fp;
unsigned long schedule_caller_pc;
sh64_switch_to_fp = (long) p->thread.sp;
/* r14 is saved at offset 4 in the sh64_switch_to frame */
schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
/* and the caller of 'schedule' is (currently!) saved at offset 24
in the frame of schedule (from disasm) */
schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
return schedule_caller_pc;
}
#endif
return pc;
}

Zobrazit soubor

@@ -1,576 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/ptrace_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
*
* Started from SH3/4 version:
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* Original x86 implementation:
* By Ross Biro 1/23/92
* edited by Linus Torvalds
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/* This mask defines the bits of the SR which the user is not allowed to
change, which are everything except S, Q, M, PR, SZ, FR. */
#define SR_MASK (0xffff8cfd)
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* This routine will get a word from the user area in the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
return (*((int *)stack));
}
static inline unsigned long
get_fpu_long(struct task_struct *task, unsigned long addr)
{
unsigned long tmp;
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
if (addr == offsetof(struct user_fpu_struct, fpscr)) {
tmp = FPSCR_INIT;
} else {
tmp = 0xffffffffUL; /* matches initial value in fpu.c */
}
return tmp;
}
if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
/*
* This routine will put a word into the user area in the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
static inline int
put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
{
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
init_fpu(task);
} else if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
struct pt_regs *regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
struct pt_regs *regs = child->thread.uregs;
regs->sr &= ~SR_SSTEP;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
/* PC, SR, SYSCALL */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&regs->pc,
0, 3 * sizeof(unsigned long long));
/* R1 -> R63 */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
offsetof(struct pt_regs, regs[0]),
63 * sizeof(unsigned long long));
/* TR0 -> TR7 */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->tregs,
offsetof(struct pt_regs, tregs[0]),
8 * sizeof(unsigned long long));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
/* PC, SR, SYSCALL */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->pc,
0, 3 * sizeof(unsigned long long));
/* R1 -> R63 */
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
offsetof(struct pt_regs, regs[0]),
63 * sizeof(unsigned long long));
/* TR0 -> TR7 */
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->tregs,
offsetof(struct pt_regs, tregs[0]),
8 * sizeof(unsigned long long));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(syscall_nr),
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REGS_OFFSET_NAME(16),
REGS_OFFSET_NAME(17),
REGS_OFFSET_NAME(18),
REGS_OFFSET_NAME(19),
REGS_OFFSET_NAME(20),
REGS_OFFSET_NAME(21),
REGS_OFFSET_NAME(22),
REGS_OFFSET_NAME(23),
REGS_OFFSET_NAME(24),
REGS_OFFSET_NAME(25),
REGS_OFFSET_NAME(26),
REGS_OFFSET_NAME(27),
REGS_OFFSET_NAME(28),
REGS_OFFSET_NAME(29),
REGS_OFFSET_NAME(30),
REGS_OFFSET_NAME(31),
REGS_OFFSET_NAME(32),
REGS_OFFSET_NAME(33),
REGS_OFFSET_NAME(34),
REGS_OFFSET_NAME(35),
REGS_OFFSET_NAME(36),
REGS_OFFSET_NAME(37),
REGS_OFFSET_NAME(38),
REGS_OFFSET_NAME(39),
REGS_OFFSET_NAME(40),
REGS_OFFSET_NAME(41),
REGS_OFFSET_NAME(42),
REGS_OFFSET_NAME(43),
REGS_OFFSET_NAME(44),
REGS_OFFSET_NAME(45),
REGS_OFFSET_NAME(46),
REGS_OFFSET_NAME(47),
REGS_OFFSET_NAME(48),
REGS_OFFSET_NAME(49),
REGS_OFFSET_NAME(50),
REGS_OFFSET_NAME(51),
REGS_OFFSET_NAME(52),
REGS_OFFSET_NAME(53),
REGS_OFFSET_NAME(54),
REGS_OFFSET_NAME(55),
REGS_OFFSET_NAME(56),
REGS_OFFSET_NAME(57),
REGS_OFFSET_NAME(58),
REGS_OFFSET_NAME(59),
REGS_OFFSET_NAME(60),
REGS_OFFSET_NAME(61),
REGS_OFFSET_NAME(62),
REGS_OFFSET_NAME(63),
TREGS_OFFSET_NAME(0),
TREGS_OFFSET_NAME(1),
TREGS_OFFSET_NAME(2),
TREGS_OFFSET_NAME(3),
TREGS_OFFSET_NAME(4),
TREGS_OFFSET_NAME(5),
TREGS_OFFSET_NAME(6),
TREGS_OFFSET_NAME(7),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* PC, SR, SYSCALL,
* R1 --> R63,
* TR0 --> TR7,
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long long),
.align = sizeof(long long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) /
sizeof(long long),
.size = sizeof(long long),
.align = sizeof(long long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
};
static const struct user_regset_view user_sh64_native_view = {
.name = "sh64",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh64_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area. We must
disallow any changes to certain SR bits or u_fpvalid, since
this could crash the kernel or result in a security
loophole. */
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs)) {
/* Ignore change of top 32 bits of SR */
if (addr == offsetof (struct pt_regs, sr)+4)
{
ret = 0;
break;
}
/* If lower 32 bits of SR, ignore non-user bits */
if (addr == offsetof (struct pt_regs, sr))
{
long cursr = get_stack_long(child, addr);
data &= ~(SR_MASK);
data |= (cursr & SR_MASK);
}
ret = put_stack_long(child, addr, data);
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
ret = put_fpu_long(child, index, data);
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int sh64_ptrace(long request, long pid,
unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
static unsigned long first_call;
if (!test_and_set_bit(0, &first_call)) {
/* Set WPC.DBRMODE to 0. This makes all debug events get
* delivered through RESVEC, i.e. into the handlers in entry.S.
* (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
* would normally be left set to 1, which makes debug events get
* delivered through DBRVEC, i.e. into the remote gdb's
* handlers. This prevents ptrace getting them, and confuses
* the remote gdb.) */
printk("DBRMODE set to 0 to permit native debugging\n");
poke_real_address_q(WPC_DBRMODE, 0);
}
return sys_ptrace(request, pid, addr, data);
}
asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
{
long long ret = 0;
secure_computing_strict(regs->regs[9]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1LL;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[9]);
audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
regs->regs[4], regs->regs[5]);
return ret ?: regs->regs[9];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */
asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
{
/* This is called after a single step exception (DEBUGSS).
There is no need to change the PC, as it is a post-execution
exception, as entry.S does not do anything to the PC for DEBUGSS.
We need to clear the Single Step setting in SR to avoid
continually stepping. */
local_irq_enable();
regs->sr &= ~SR_SSTEP;
force_sig(SIGTRAP);
}
/* Called with interrupts disabled */
BUILD_TRAP_HANDLER(breakpoint)
{
TRAP_HANDLER_DECL;
/* We need to forward step the PC, to counteract the backstep done
in signal.c. */
local_irq_enable();
force_sig(SIGTRAP);
regs->pc += 4;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}

Zobrazit soubor

@@ -4,9 +4,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
#ifdef CONFIG_SUPERH32
#include <asm/watchdog.h>
#endif
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/tlbflush.h>
@@ -15,13 +13,11 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
#ifdef CONFIG_SUPERH32
static void watchdog_trigger_immediate(void)
{
sh_wdt_write_cnt(0xFF);
sh_wdt_write_csr(0xC2);
}
#endif
static void native_machine_restart(char * __unused)
{
@@ -33,10 +29,8 @@ static void native_machine_restart(char * __unused)
/* Address error with SR.BL=1 first. */
trigger_address_error();
#ifdef CONFIG_SUPERH32
/* If that fails or is unsupported, go for the watchdog next. */
watchdog_trigger_immediate();
#endif
/*
* Give up and sleep.

Zobrazit soubor

@@ -1,51 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/sh_ksyms_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*/
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/screen_info.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
EXPORT_SYMBOL(__put_user_asm_b);
EXPORT_SYMBOL(__put_user_asm_w);
EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__put_user_asm_q);
EXPORT_SYMBOL(__get_user_asm_b);
EXPORT_SYMBOL(__get_user_asm_w);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_q);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
/* Ugh. These come in from libgcc.a at link time. */
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
DECLARE_EXPORT(__sdivsi3);
DECLARE_EXPORT(__sdivsi3_1);
DECLARE_EXPORT(__sdivsi3_2);
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__div_table);

Zobrazit soubor

@@ -1,567 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/signal_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#define REG_RET 9
#define REG_ARG1 2
#define REG_ARG2 3
#define REG_ARG3 4
#define REG_SP 15
#define REG_PR 18
#define REF_REG_RET regs->regs[REG_RET]
#define REF_REG_SP regs->regs[REG_SP]
#define DEREF_REG_PR regs->regs[REG_PR]
#define DEBUG_SIG 0
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs);
static inline void
handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
{
/* If we're not from a syscall, bail out */
if (regs->syscall_nr < 0)
return;
/* check for system call restart.. */
switch (regs->regs[REG_RET]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
no_system_call_restart:
regs->regs[REG_RET] = -EINTR;
break;
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case -ERESTARTNOINTR:
/* Decode syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
static void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
if (get_signal(&ksig)) {
handle_syscall_restart(regs, &ksig.ka.sa);
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
/* Did we come from a system call? */
if (regs->syscall_nr >= 0) {
/* Restart the system call - no handlers present */
switch (regs->regs[REG_RET]) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
/* Decode Syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
break;
case -ERESTART_RESTARTBLOCK:
regs->regs[REG_RET] = __NR_restart_syscall;
regs->pc -= 4;
break;
}
}
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
}
/*
* Do a signal return; undo the signal stack.
*/
struct sigframe {
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
long long retcode[2];
};
struct rt_sigframe {
struct siginfo __user *pinfo;
void *puc;
struct siginfo info;
struct ucontext uc;
long long retcode[2];
};
#ifdef CONFIG_SH_FPU
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
err |= __get_user (fpvalid, &sc->sc_fpvalid);
conditional_used_math(fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
fpvalid = !!used_math();
err |= __put_user(fpvalid, &sc->sc_fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
return err;
}
#else
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
return 0;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
return 0;
}
#endif
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
{
unsigned int err = 0;
unsigned long long current_sr, new_sr;
#define SR_MASK 0xffff8cfd
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
/* Prevent the signal handler manipulating SR in a way that can
crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
modified */
current_sr = regs->sr;
err |= __get_user(new_sr, &sc->sc_sr);
regs->sr &= SR_MASK;
regs->sr |= (new_sr & ~SR_MASK);
COPY(pc);
#undef COPY
/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
* has been restored above.) */
err |= restore_sigcontext_fpu(regs, sc);
regs->syscall_nr = -1; /* disable syscall checks */
err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
return err;
}
asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &ret))
goto badframe;
regs->pc -= 4;
return (int) ret;
badframe:
force_sig(SIGSEGV);
return 0;
}
asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
goto badframe;
regs->pc -= 4;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return (int) ret;
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Set up a signal frame.
*/
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
err |= setup_sigcontext_fpu(regs, sc);
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
COPY(sr); COPY(pc);
#undef COPY
err |= __put_user(mask, &sc->oldmask);
return err;
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -8ul);
}
void sa_default_restorer(void); /* See comments below */
void sa_default_rt_restorer(void); /* See comments below */
static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int err = 0, sig = ksig->sig;
int signal;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
if (_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)); }
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
ksig->ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
return -EFAULT;
/* Cohere the trampoline with the I-cache. */
flush_cache_sigtramp(DEREF_REG_PR-1);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
/* FIXME:
The glibc profiling support for SH-5 needs to be passed a sigcontext
so it can retrieve the PC. At some point during 2003 the glibc
support was changed to receive the sigcontext through the 2nd
argument, but there are still versions of libc.so in use that use
the 3rd argument. Until libc.so is stabilised, pass the sigcontext
through both 2nd and 3rd arguments.
*/
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
/* Broken %016Lx */
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
return 0;
}
static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->regs[REG_SP]);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
ksig->ka.sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
return -EFAULT;
/* Cohere the trampoline with the I-cache. */
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
return 0;
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
/* Set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(ksig, oldset, regs);
else
ret = setup_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}

Zobrazit soubor

@@ -1,419 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/syscalls_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/sys.h>
.section .data, "aw"
.balign 32
/*
* System calls jump table
*/
.globl sys_call_table
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sh64_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys( */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_ni_syscall /* sys_oldselect */
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall /* Obsolete implementation of socket syscall */
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* idle */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc /* Obsolete ipc syscall implementation */
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* getpmsg */
.long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
/* Broken-out socket family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_socket /* 220 */
.long sys_bind
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname /* 225 */
.long sys_getpeername
.long sys_socketpair
.long sys_send
.long sys_sendto
.long sys_recv /* 230*/
.long sys_recvfrom
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg /* 235 */
.long sys_recvmsg
/* Broken-out IPC family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_semop
.long sys_semget
.long sys_semctl
.long sys_msgsnd /* 240 */
.long sys_msgrcv
.long sys_msgget
.long sys_msgctl
.long sys_shmat
.long sys_shmdt /* 245 */
.long sys_shmget
.long sys_shmctl
/* Rest of syscalls listed in 2.4 i386 unistd.h */
.long sys_getdents64
.long sys_fcntl64
.long sys_ni_syscall /* 250 reserved for TUX */
.long sys_ni_syscall /* Reserved for Security */
.long sys_gettid
.long sys_readahead
.long sys_setxattr
.long sys_lsetxattr /* 255 */
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr /* 260 */
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr /* 265 */
.long sys_tkill
.long sys_sendfile64
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
.long sys_ni_syscall /* reserved for set_thread_area */
.long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64
.long sys_ni_syscall
.long sys_exit_group /* 280 */
/* Rest of new 2.6 syscalls */
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages /* 285 */
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun /* 290 */
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep /* 295 */
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill
.long sys_utimes
.long sys_fadvise64_64 /* 300 */
.long sys_ni_syscall /* Reserved for vserver */
.long sys_ni_syscall /* Reserved for mbind */
.long sys_ni_syscall /* get_mempolicy */
.long sys_ni_syscall /* set_mempolicy */
.long sys_mq_open /* 305 */
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify
.long sys_mq_getsetattr /* 310 */
.long sys_ni_syscall /* Reserved for kexec */
.long sys_waitid
.long sys_add_key
.long sys_request_key
.long sys_keyctl /* 315 */
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch /* 320 */
.long sys_ni_syscall
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 325 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 330 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 335 */
.long sys_pselect6
.long sys_ppoll
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 340 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 345 */
.long sys_getcpu
.long sys_epoll_pwait
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create /* 350 */
.long sys_eventfd
.long sys_fallocate
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4 /* 355 */
.long sys_eventfd2
.long sys_epoll_create1
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1 /* 360 */
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg /* 365 */
.long sys_accept4
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 370 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
.long sys_setns /* 375 */
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_getattr /* 380 */
.long sys_sched_setattr
.long sys_renameat2
.long sys_seccomp
.long sys_getrandom
.long sys_memfd_create /* 385 */
.long sys_bpf
.long sys_execveat
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2 /* 390 */
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2

Zobrazit soubor

@@ -1,814 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/traps_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/alignment.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
insn_size_t opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
if (!access_ok(aligned_pc, sizeof(insn_size_t))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
} else {
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
*result_opcode = *(insn_size_t *)aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
/* SHcompact */
/* TODO : provide handling for this. We don't really support
user-mode SHcompact yet, and for a kernel fault, this would
have to come from a module built for SHcompact. */
return -EFAULT;
} else {
/* misaligned */
return -EFAULT;
}
}
static int address_is_sign_extended(__u64 a)
{
__u64 b;
#if (NEFF == 32)
b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
return (b == a) ? 1 : 0;
#else
#error "Sign extend check only works for NEFF==32"
#endif
}
/* return -1 for fault, 0 for OK */
static int generate_and_check_address(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
__u64 base_address, addr;
int basereg;
switch (1 << width_shift) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
__s64 displacement;
displacement = (opcode >> 10) & 0x3ff;
displacement = sign_extend64(displacement, 9);
addr = (__u64)((__s64)base_address + (displacement << width_shift));
} else {
__u64 offset;
int offsetreg;
offsetreg = (opcode >> 10) & 0x3f;
offset = regs->regs[offsetreg];
addr = base_address + offset;
}
/* Check sign extended */
if (!address_is_sign_extended(addr))
return -1;
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
inc_unaligned_user_access();
if (addr >= TASK_SIZE)
return -1;
} else
inc_unaligned_kernel_access();
*address = addr;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
unaligned_fixups_notify(current, opcode, regs);
return 0;
}
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
q[0] = p[0];
q[1] = p[1];
if (do_sign_extend) {
*result = (__u64)(__s64) *(short *) &x;
} else {
*result = (__u64) x;
}
}
static void misaligned_kernel_word_store(__u64 address, __u64 value)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
x = (__u16) value;
p[0] = q[0];
p[1] = q[1];
}
static int misaligned_load(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
switch (width_shift) {
case 1:
if (do_sign_extend) {
regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
} else {
regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
}
break;
case 2:
regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
break;
case 3:
regs->regs[destreg] = buffer;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
switch (width_shift) {
case 1:
misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
break;
case 2:
asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
case 3:
asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
static int misaligned_store(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
switch (width_shift) {
case 1:
*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
break;
case 2:
*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
break;
case 3:
buffer = regs->regs[srcreg];
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
switch (width_shift) {
case 1:
misaligned_kernel_word_store(address, val);
break;
case 2:
asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
break;
case 3:
asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fpu_store(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
/* Initialise these to NaNs. */
__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
switch (width_shift) {
case 2:
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
*(__u32*) &buffer = buflo;
*(1 + (__u32*) &buffer) = bufhi;
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fixup(struct pt_regs *regs)
{
insn_size_t opcode;
int error;
int major, minor;
unsigned int user_action;
user_action = unaligned_user_action();
if (!(user_action & UM_FIXUP))
return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
return error;
}
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
break;
case (0xb0>>2): /* LD.UW */
error = misaligned_load(regs, opcode, 1, 1, 0);
break;
case (0x88>>2): /* LD.L */
error = misaligned_load(regs, opcode, 1, 2, 1);
break;
case (0x8c>>2): /* LD.Q */
error = misaligned_load(regs, opcode, 1, 3, 0);
break;
case (0xa4>>2): /* ST.W */
error = misaligned_store(regs, opcode, 1, 1);
break;
case (0xa8>>2): /* ST.L */
error = misaligned_store(regs, opcode, 1, 2);
break;
case (0xac>>2): /* ST.Q */
error = misaligned_store(regs, opcode, 1, 3);
break;
case (0x40>>2): /* indexed loads */
switch (minor) {
case 0x1: /* LDX.W */
error = misaligned_load(regs, opcode, 0, 1, 1);
break;
case 0x5: /* LDX.UW */
error = misaligned_load(regs, opcode, 0, 1, 0);
break;
case 0x2: /* LDX.L */
error = misaligned_load(regs, opcode, 0, 2, 1);
break;
case 0x3: /* LDX.Q */
error = misaligned_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0x60>>2): /* indexed stores */
switch (minor) {
case 0x1: /* STX.W */
error = misaligned_store(regs, opcode, 0, 1);
break;
case 0x2: /* STX.L */
error = misaligned_store(regs, opcode, 0, 2);
break;
case 0x3: /* STX.Q */
error = misaligned_store(regs, opcode, 0, 3);
break;
default:
error = -1;
break;
}
break;
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
case (0x98>>2): /* FLD.P */
error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
break;
case (0x9c>>2): /* FLD.D */
error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
break;
case (0x1c>>2): /* floating indexed loads */
switch (minor) {
case 0x8: /* FLDX.S */
error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FLDX.P */
error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FLDX.D */
error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0xb4>>2): /* FLD.S */
error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
break;
case (0xb8>>2): /* FLD.P */
error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
break;
case (0xbc>>2): /* FLD.D */
error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
break;
case (0x3c>>2): /* floating indexed stores */
switch (minor) {
case 0x8: /* FSTX.S */
error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FSTX.P */
error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FSTX.D */
error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
default:
/* Fault */
error = -1;
break;
}
if (error < 0) {
return error;
} else {
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
}
static void do_unhandled_exception(int signr, char *str, unsigned long error,
struct pt_regs *regs)
{
if (user_mode(regs))
force_sig(signr);
die_if_no_fixup(str, regs, error);
}
#define DO_ERROR(signr, str, name) \
asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
{ \
do_unhandled_exception(signr, str, error_code, regs); \
}
DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
#if defined(CONFIG_SH64_ID2815_WORKAROUND)
#define OPCODE_INVALID 0
#define OPCODE_USER_VALID 1
#define OPCODE_PRIV_VALID 2
/* getcon/putcon - requires checking which control register is referenced. */
#define OPCODE_CTRL_REG 3
/* Table of valid opcodes for SHmedia mode.
Form a 10-bit value by concatenating the major/minor opcodes i.e.
opcode[31:26,20:16]. The 6 MSBs of this value index into the following
array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
LSBs==4'b0000 etc). */
static unsigned long shmedia_opcode_table[64] = {
0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
/* Workaround SH5-101 cut2 silicon defect #2815 :
in some situations, inter-mode branches from SHcompact -> SHmedia
which should take ITLBMISS or EXECPROT exceptions at the target
falsely take RESINST at the target instead. */
void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
unsigned long pc, aligned_pc;
unsigned long index, shift;
unsigned long major, minor, combined;
unsigned long reserved_field;
int opcode_state;
int get_user_error;
int signr = SIGILL;
char *exception_name = "reserved_instruction";
pc = regs->pc;
/* SHcompact is not handled */
if (unlikely((pc & 3) == 0))
goto out;
/* SHmedia : check for defect. This requires executable vmas
to be readable too. */
aligned_pc = pc & ~3;
if (!access_ok(aligned_pc, sizeof(insn_size_t)))
get_user_error = -EFAULT;
else
get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
if (get_user_error < 0) {
/*
* Error trying to read opcode. This typically means a
* real fault, not a RESINST any more. So change the
* codes.
*/
exception_name = "address error (exec)";
signr = SIGSEGV;
goto out;
}
/* These bits are currently reserved as zero in all valid opcodes */
reserved_field = opcode & 0xf;
if (unlikely(reserved_field))
goto out; /* invalid opcode */
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
combined = (major << 4) | minor;
index = major;
shift = minor << 1;
opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
switch (opcode_state) {
case OPCODE_INVALID:
/* Trap. */
break;
case OPCODE_USER_VALID:
/*
* Restart the instruction: the branch to the instruction
* will now be from an RTE not from SHcompact so the
* silicon defect won't be triggered.
*/
return;
case OPCODE_PRIV_VALID:
if (!user_mode(regs)) {
/*
* Should only ever get here if a module has
* SHcompact code inside it. If so, the same fix
* up is needed.
*/
return; /* same reason */
}
/*
* Otherwise, user mode trying to execute a privileged
* instruction - fall through to trap.
*/
break;
case OPCODE_CTRL_REG:
/* If in privileged mode, return as above. */
if (!user_mode(regs))
return;
/* In user mode ... */
if (combined == 0x9f) { /* GETCON */
unsigned long regno = (opcode >> 20) & 0x3f;
if (regno >= 62)
return;
/* reserved/privileged control register => trap */
} else if (combined == 0x1bf) { /* PUTCON */
unsigned long regno = (opcode >> 4) & 0x3f;
if (regno >= 62)
return;
/* reserved/privileged control register => trap */
}
break;
default:
/* Fall through to trap. */
break;
}
out:
do_unhandled_exception(signr, exception_name, error_code, regs);
}
#else /* CONFIG_SH64_ID2815_WORKAROUND */
/* If the workaround isn't needed, this is just a straightforward reserved
instruction */
DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
#endif /* CONFIG_SH64_ID2815_WORKAROUND */
/* Called with interrupts disabled */
asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
{
die_if_kernel("exception", regs, ex);
}
asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
{
/* Syscall debug */
printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
die_if_kernel("unknown trapa", regs, scId);
return -ENOSYS;
}
/* Implement misaligned load/store handling for kernel (and optionally for user
mode too). Limitation : only SHmedia mode code is handled - there is no
handling at all for misaligned accesses occurring in SHcompact code yet. */
asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0)
do_unhandled_exception(SIGSEGV, "address error(load)",
error_code, regs);
}
asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0)
do_unhandled_exception(SIGSEGV, "address error(store)",
error_code, regs);
}
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
u64 peek_real_address_q(u64 addr);
u64 poke_real_address_q(u64 addr, u64 val);
unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
unsigned long long exp_cause;
/* It's not worth ioremapping the debug module registers for the amount
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
if (exp_cause & ~4)
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
void per_cpu_trap_init(void)
{
/* Nothing to do for now, VBR initialization later. */
}

Zobrazit soubor

@@ -3,14 +3,7 @@
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka and Paul Mundt
*/
#ifdef CONFIG_SUPERH64
#define LOAD_OFFSET PAGE_OFFSET
OUTPUT_ARCH(sh:sh5)
#else
#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
#endif
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
@@ -28,14 +21,13 @@ SECTIONS
_text = .; /* Text and read-only data */
.empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
.empty_zero_page : AT(ADDR(.empty_zero_page)) {
*(.empty_zero_page)
} = 0
.text : AT(ADDR(.text) - LOAD_OFFSET) {
.text : AT(ADDR(.text)) {
HEAD_TEXT
TEXT_TEXT
EXTRA_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -62,7 +54,7 @@ SECTIONS
INIT_DATA_SECTION(16)
. = ALIGN(4);
.machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
.machvec.init : AT(ADDR(.machvec.init)) {
__machvec_start = .;
*(.machvec.init)
__machvec_end = .;
@@ -74,8 +66,8 @@ SECTIONS
* .exit.text is discarded at runtime, not link time, to deal with
* references from __bug_table
*/
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
.exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data)) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;