Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
This commit is contained in:
Linus Torvalds
2005-04-16 15:20:36 -07:00
當前提交 1da177e4c3
共有 17291 個文件被更改,包括 6718755 次插入0 次删除

36
arch/sh64/kernel/Makefile Normal file
查看文件

@@ -0,0 +1,36 @@
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2000, 2001 Paolo Alberelli
# Copyright (C) 2003 Paul Mundt
#
# Makefile for the Linux sh64 kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \
ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \
switchto.o syscalls.o
obj-$(CONFIG_HEARTBEAT) += led.o
obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o
obj-$(CONFIG_SH_DMA) += dma.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KALLSYMS) += unwind.o
obj-$(CONFIG_PCI) += pci-dma.o pcibios.o
obj-$(CONFIG_MODULES) += module.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_CPU_SH5) += pci_sh5.o
endif
USE_STANDARD_AS_RULE := true

查看文件

@@ -0,0 +1,45 @@
/*
* arch/sh64/kernel/alpanum.c
*
* Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Machine-independent functions for handling 8-digit alphanumeric display
* (e.g. Agilent HDSP-253x)
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/sched.h>
void mach_alphanum(int pos, unsigned char val);
void mach_led(int pos, int val);
void print_seg(char *file, int line)
{
int i;
unsigned int nibble;
for (i = 0; i < 5; i++) {
mach_alphanum(i, file[i]);
}
for (i = 0; i < 3; i++) {
nibble = ((line >> (i * 4)) & 0xf);
mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
}
}
void print_seg_num(unsigned num)
{
int i;
unsigned int nibble;
for (i = 0; i < 8; i++) {
nibble = ((num >> (i * 4)) & 0xf);
mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
}
}

查看文件

@@ -0,0 +1,33 @@
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <asm/thread_info.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* offsets into the thread_info struct */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
return 0;
}

297
arch/sh64/kernel/dma.c Normal file
查看文件

@@ -0,0 +1,297 @@
/*
* arch/sh64/kernel/dma.c
*
* DMA routines for the SH-5 DMAC.
*
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/hardware.h>
#include <asm/dma.h>
#include <asm/signal.h>
#include <asm/errno.h>
#include <asm/io.h>
typedef struct {
unsigned long dev_addr;
unsigned long mem_addr;
unsigned int mode;
unsigned int count;
} dma_info_t;
static dma_info_t dma_info[MAX_DMA_CHANNELS];
static DEFINE_SPINLOCK(dma_spin_lock);
/* arch/sh64/kernel/irq_intc.c */
extern void make_intc_irq(unsigned int irq);
/* DMAC Interrupts */
#define DMA_IRQ_DMTE0 18
#define DMA_IRQ_DERR 22
#define DMAC_COMMON_BASE (dmac_base + 0x08)
#define DMAC_SAR_BASE (dmac_base + 0x10)
#define DMAC_DAR_BASE (dmac_base + 0x18)
#define DMAC_COUNT_BASE (dmac_base + 0x20)
#define DMAC_CTRL_BASE (dmac_base + 0x28)
#define DMAC_STATUS_BASE (dmac_base + 0x30)
#define DMAC_SAR(n) (DMAC_SAR_BASE + ((n) * 0x28))
#define DMAC_DAR(n) (DMAC_DAR_BASE + ((n) * 0x28))
#define DMAC_COUNT(n) (DMAC_COUNT_BASE + ((n) * 0x28))
#define DMAC_CTRL(n) (DMAC_CTRL_BASE + ((n) * 0x28))
#define DMAC_STATUS(n) (DMAC_STATUS_BASE + ((n) * 0x28))
/* DMAC.COMMON Bit Definitions */
#define DMAC_COMMON_PR 0x00000001 /* Priority */
/* Bits 1-2 Reserved */
#define DMAC_COMMON_ME 0x00000008 /* Master Enable */
#define DMAC_COMMON_NMI 0x00000010 /* NMI Flag */
/* Bits 5-6 Reserved */
#define DMAC_COMMON_ER 0x00000780 /* Error Response */
#define DMAC_COMMON_AAE 0x00007800 /* Address Alignment Error */
/* Bits 15-63 Reserved */
/* DMAC.SAR Bit Definitions */
#define DMAC_SAR_ADDR 0xffffffff /* Source Address */
/* DMAC.DAR Bit Definitions */
#define DMAC_DAR_ADDR 0xffffffff /* Destination Address */
/* DMAC.COUNT Bit Definitions */
#define DMAC_COUNT_CNT 0xffffffff /* Transfer Count */
/* DMAC.CTRL Bit Definitions */
#define DMAC_CTRL_TS 0x00000007 /* Transfer Size */
#define DMAC_CTRL_SI 0x00000018 /* Source Increment */
#define DMAC_CTRL_DI 0x00000060 /* Destination Increment */
#define DMAC_CTRL_RS 0x00000780 /* Resource Select */
#define DMAC_CTRL_IE 0x00000800 /* Interrupt Enable */
#define DMAC_CTRL_TE 0x00001000 /* Transfer Enable */
/* Bits 15-63 Reserved */
/* DMAC.STATUS Bit Definitions */
#define DMAC_STATUS_TE 0x00000001 /* Transfer End */
#define DMAC_STATUS_AAE 0x00000002 /* Address Alignment Error */
/* Bits 2-63 Reserved */
static unsigned long dmac_base;
void set_dma_count(unsigned int chan, unsigned int count);
void set_dma_addr(unsigned int chan, unsigned int addr);
static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned int chan = irq - DMA_IRQ_DMTE0;
dma_info_t *info = dma_info + chan;
u64 status;
if (info->mode & DMA_MODE_WRITE) {
sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan));
} else {
sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan));
}
set_dma_count(chan, info->count);
/* Clear the TE bit */
status = sh64_in64(DMAC_STATUS(chan));
status &= ~DMAC_STATUS_TE;
sh64_out64(status, DMAC_STATUS(chan));
return IRQ_HANDLED;
}
static struct irqaction irq_dmte = {
.handler = dma_mte,
.flags = SA_INTERRUPT,
.name = "DMA MTE",
};
static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
{
u64 tmp;
u8 chan;
printk(KERN_NOTICE "DMAC: Got a DMA Error!\n");
tmp = sh64_in64(DMAC_COMMON_BASE);
/* Check for the type of error */
if ((chan = tmp & DMAC_COMMON_AAE)) {
/* It's an address alignment error.. */
printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan);
printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n",
(sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR),
(sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR),
(sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT));
} else if ((chan = tmp & DMAC_COMMON_ER)) {
/* Something else went wrong.. */
printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan);
}
/* Reset the ME bit to clear the interrupt */
tmp |= DMAC_COMMON_ME;
sh64_out64(tmp, DMAC_COMMON_BASE);
return IRQ_HANDLED;
}
static struct irqaction irq_derr = {
.handler = dma_err,
.flags = SA_INTERRUPT,
.name = "DMA Error",
};
static inline unsigned long calc_xmit_shift(unsigned int chan)
{
return sh64_in64(DMAC_CTRL(chan)) & 0x03;
}
void setup_dma(unsigned int chan, dma_info_t *info)
{
unsigned int irq = DMA_IRQ_DMTE0 + chan;
dma_info_t *dma = dma_info + chan;
make_intc_irq(irq);
setup_irq(irq, &irq_dmte);
dma = info;
}
void enable_dma(unsigned int chan)
{
u64 ctrl;
ctrl = sh64_in64(DMAC_CTRL(chan));
ctrl |= DMAC_CTRL_TE;
sh64_out64(ctrl, DMAC_CTRL(chan));
}
void disable_dma(unsigned int chan)
{
u64 ctrl;
ctrl = sh64_in64(DMAC_CTRL(chan));
ctrl &= ~DMAC_CTRL_TE;
sh64_out64(ctrl, DMAC_CTRL(chan));
}
void set_dma_mode(unsigned int chan, char mode)
{
dma_info_t *info = dma_info + chan;
info->mode = mode;
set_dma_addr(chan, info->mem_addr);
set_dma_count(chan, info->count);
}
void set_dma_addr(unsigned int chan, unsigned int addr)
{
dma_info_t *info = dma_info + chan;
unsigned long sar, dar;
info->mem_addr = addr;
sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr;
dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr;
sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan));
sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan));
}
void set_dma_count(unsigned int chan, unsigned int count)
{
dma_info_t *info = dma_info + chan;
u64 tmp;
info->count = count;
tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT;
sh64_out64(tmp, DMAC_COUNT(chan));
}
unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
int get_dma_residue(unsigned int chan)
{
return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan));
}
int __init init_dma(void)
{
struct vcr_info vcr;
u64 tmp;
/* Remap the DMAC */
dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC");
if (!dmac_base) {
printk(KERN_ERR "Unable to remap DMAC\n");
return -ENOMEM;
}
/* Report DMAC.VCR Info */
vcr = sh64_get_vcr_info(dmac_base);
printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n",
vcr.mod_id, vcr.mod_vers);
/* Set the ME bit */
tmp = sh64_in64(DMAC_COMMON_BASE);
tmp |= DMAC_COMMON_ME;
sh64_out64(tmp, DMAC_COMMON_BASE);
/* Enable the DMAC Error Interrupt */
make_intc_irq(DMA_IRQ_DERR);
setup_irq(DMA_IRQ_DERR, &irq_derr);
return 0;
}
static void __exit exit_dma(void)
{
onchip_unmap(dmac_base);
free_irq(DMA_IRQ_DERR, 0);
}
module_init(init_dma);
module_exit(exit_dma);
MODULE_AUTHOR("Paul Mundt");
MODULE_DESCRIPTION("DMA API for SH-5 DMAC");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(setup_dma);
EXPORT_SYMBOL(claim_dma_lock);
EXPORT_SYMBOL(release_dma_lock);
EXPORT_SYMBOL(enable_dma);
EXPORT_SYMBOL(disable_dma);
EXPORT_SYMBOL(set_dma_mode);
EXPORT_SYMBOL(set_dma_addr);
EXPORT_SYMBOL(set_dma_count);
EXPORT_SYMBOL(get_dma_residue);

查看文件

@@ -0,0 +1,105 @@
/*
* arch/sh64/kernel/early_printk.c
*
* SH-5 Early SCIF console (cloned and hacked from sh implementation)
*
* Copyright (C) 2003, 2004 Paul Mundt <lethal@linux-sh.org>
* Copyright (C) 2002 M. R. Brown <mrbrown@0xd6.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/hardware.h>
#define SCIF_BASE_ADDR 0x01030000
#define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
/*
* Fixed virtual address where SCIF is mapped (should already be done
* in arch/sh64/kernel/head.S!).
*/
#define SCIF_REG 0xfa030000
enum {
SCIF_SCSMR2 = SCIF_REG + 0x00,
SCIF_SCBRR2 = SCIF_REG + 0x04,
SCIF_SCSCR2 = SCIF_REG + 0x08,
SCIF_SCFTDR2 = SCIF_REG + 0x0c,
SCIF_SCFSR2 = SCIF_REG + 0x10,
SCIF_SCFRDR2 = SCIF_REG + 0x14,
SCIF_SCFCR2 = SCIF_REG + 0x18,
SCIF_SCFDR2 = SCIF_REG + 0x1c,
SCIF_SCSPTR2 = SCIF_REG + 0x20,
SCIF_SCLSR2 = SCIF_REG + 0x24,
};
static void sh_console_putc(int c)
{
while (!(ctrl_inw(SCIF_SCFSR2) & 0x20))
cpu_relax();
ctrl_outb(c, SCIF_SCFTDR2);
ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2);
if (c == '\n')
sh_console_putc('\r');
}
static void sh_console_flush(void)
{
ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
while (!(ctrl_inw(SCIF_SCFSR2) & 0x40))
cpu_relax();
ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
}
static void sh_console_write(struct console *con, const char *s, unsigned count)
{
while (count-- > 0)
sh_console_putc(*s++);
sh_console_flush();
}
static int __init sh_console_setup(struct console *con, char *options)
{
con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8;
return 0;
}
static struct console sh_console = {
.name = "scifcon",
.write = sh_console_write,
.setup = sh_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
void __init enable_early_printk(void)
{
ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */
ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */
ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */
ctrl_outw(0, SCIF_SCSPTR2);
ctrl_outw(0x60, SCIF_SCFSR2);
ctrl_outw(0, SCIF_SCLSR2);
ctrl_outw(0x30, SCIF_SCSCR2);
register_console(&sh_console);
}
void disable_early_printk(void)
{
unregister_console(&sh_console);
}

2103
arch/sh64/kernel/entry.S Normal file

文件差異過大導致無法顯示 Load Diff

170
arch/sh64/kernel/fpu.c Normal file
查看文件

@@ -0,0 +1,170 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/fpu.c
*
* Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
* Copyright (C) 2002 STMicroelectronics Limited
* Author : Stuart Menefy
*
* Started from SH4 version:
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
#include <asm/user.h>
#include <asm/io.h>
/*
* Initially load the FPU with signalling NANS. This bit pattern
* has the property that no matter whether considered as single or as
* double precision, it still represents a signalling NAN.
*/
#define sNAN64 0xFFFFFFFFFFFFFFFFULL
#define sNAN32 0xFFFFFFFFUL
static union sh_fpu_union init_fpuregs = {
.hard = {
.fp_regs = { [0 ... 63] = sNAN32 },
.fpscr = FPSCR_INIT
}
};
inline void fpsave(struct sh_fpu_hard_struct *fpregs)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
"fst.p %0, (2*8), fp4\n\t"
"fst.p %0, (3*8), fp6\n\t"
"fst.p %0, (4*8), fp8\n\t"
"fst.p %0, (5*8), fp10\n\t"
"fst.p %0, (6*8), fp12\n\t"
"fst.p %0, (7*8), fp14\n\t"
"fst.p %0, (8*8), fp16\n\t"
"fst.p %0, (9*8), fp18\n\t"
"fst.p %0, (10*8), fp20\n\t"
"fst.p %0, (11*8), fp22\n\t"
"fst.p %0, (12*8), fp24\n\t"
"fst.p %0, (13*8), fp26\n\t"
"fst.p %0, (14*8), fp28\n\t"
"fst.p %0, (15*8), fp30\n\t"
"fst.p %0, (16*8), fp32\n\t"
"fst.p %0, (17*8), fp34\n\t"
"fst.p %0, (18*8), fp36\n\t"
"fst.p %0, (19*8), fp38\n\t"
"fst.p %0, (20*8), fp40\n\t"
"fst.p %0, (21*8), fp42\n\t"
"fst.p %0, (22*8), fp44\n\t"
"fst.p %0, (23*8), fp46\n\t"
"fst.p %0, (24*8), fp48\n\t"
"fst.p %0, (25*8), fp50\n\t"
"fst.p %0, (26*8), fp52\n\t"
"fst.p %0, (27*8), fp54\n\t"
"fst.p %0, (28*8), fp56\n\t"
"fst.p %0, (29*8), fp58\n\t"
"fst.p %0, (30*8), fp60\n\t"
"fst.p %0, (31*8), fp62\n\t"
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
: "r" (fpregs)
: "memory");
}
static inline void
fpload(struct sh_fpu_hard_struct *fpregs)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
"fld.p %0, (2*8), fp4\n\t"
"fld.p %0, (3*8), fp6\n\t"
"fld.p %0, (4*8), fp8\n\t"
"fld.p %0, (5*8), fp10\n\t"
"fld.p %0, (6*8), fp12\n\t"
"fld.p %0, (7*8), fp14\n\t"
"fld.p %0, (8*8), fp16\n\t"
"fld.p %0, (9*8), fp18\n\t"
"fld.p %0, (10*8), fp20\n\t"
"fld.p %0, (11*8), fp22\n\t"
"fld.p %0, (12*8), fp24\n\t"
"fld.p %0, (13*8), fp26\n\t"
"fld.p %0, (14*8), fp28\n\t"
"fld.p %0, (15*8), fp30\n\t"
"fld.p %0, (16*8), fp32\n\t"
"fld.p %0, (17*8), fp34\n\t"
"fld.p %0, (18*8), fp36\n\t"
"fld.p %0, (19*8), fp38\n\t"
"fld.p %0, (20*8), fp40\n\t"
"fld.p %0, (21*8), fp42\n\t"
"fld.p %0, (22*8), fp44\n\t"
"fld.p %0, (23*8), fp46\n\t"
"fld.p %0, (24*8), fp48\n\t"
"fld.p %0, (25*8), fp50\n\t"
"fld.p %0, (26*8), fp52\n\t"
"fld.p %0, (27*8), fp54\n\t"
"fld.p %0, (28*8), fp56\n\t"
"fld.p %0, (29*8), fp58\n\t"
"fld.p %0, (30*8), fp60\n\t"
"fld.s %0, (32*8), fr63\n\t"
"fputscr fr63\n\t"
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
: "r" (fpregs) );
}
void fpinit(struct sh_fpu_hard_struct *fpregs)
{
*fpregs = init_fpuregs.hard;
}
asmlinkage void
do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
struct task_struct *tsk = current;
regs->pc += 4;
tsk->thread.trap_no = 11;
tsk->thread.error_code = 0;
force_sig(SIGFPE, tsk);
}
asmlinkage void
do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
{
void die(const char *str, struct pt_regs *regs, long err);
if (! user_mode(regs))
die("FPU used in kernel", regs, ex);
regs->sr &= ~SR_FD;
if (last_task_used_math == current)
return;
grab_fpu();
if (last_task_used_math != NULL) {
/* Other processes fpu state, save away */
fpsave(&last_task_used_math->thread.fpu.hard);
}
last_task_used_math = current;
if (used_math()) {
fpload(&current->thread.fpu.hard);
} else {
/* First time FPU user. */
fpload(&init_fpuregs.hard);
set_used_math();
}
release_fpu();
}

373
arch/sh64/kernel/head.S Normal file
查看文件

@@ -0,0 +1,373 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/head.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
*
* benedict.gaster@superh.com: 2nd May 2002
* Moved definition of empty_zero_page to its own section allowing
* it to be placed at an absolute address known at load time.
*
* lethal@linux-sh.org: 9th May 2003
* Kill off GLOBAL_NAME() usage.
*
* lethal@linux-sh.org: 8th May 2004
* Add early SCIF console DTLB mapping.
*/
#include <linux/config.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/cache.h>
#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/registers.h>
#include <asm/thread_info.h>
/*
* MMU defines: TLB boundaries.
*/
#define MMUIR_FIRST ITLB_FIXED
#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUIR_STEP TLB_STEP
#define MMUDR_FIRST DTLB_FIXED
#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUDR_STEP TLB_STEP
/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
#endif
/*
* MMU defines: Fixed TLBs.
*/
/* Deal safely with the case where the base of RAM is not 512Mb aligned */
#define ALIGN_512M_MASK (0xffffffffe0000000)
#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
#ifdef CONFIG_ICACHE_DISABLED
#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
#else
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#endif
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#if defined (CONFIG_DCACHE_DISABLED)
#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
/* WT, invalidate */
#elif defined (CONFIG_DCACHE_WRITE_BACK)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
/* WB, invalidate */
#else
#error preprocessor flag CONFIG_DCACHE_... not recognized!
#endif
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.section .empty_zero_page, "aw"
.global empty_zero_page
empty_zero_page:
.long 1 /* MOUNT_ROOT_RDONLY */
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
.long 0x00800000 /* INITRD_START */
.long 0x00800000 /* INITRD_SIZE */
.long 0
.text
.balign 4096,0,4096
.section .data, "aw"
.balign PAGE_SIZE
.section .data, "aw"
.balign PAGE_SIZE
.global swapper_pg_dir
swapper_pg_dir:
.space PAGE_SIZE, 0
.global empty_bad_page
empty_bad_page:
.space PAGE_SIZE, 0
.global empty_bad_pte_table
empty_bad_pte_table:
.space PAGE_SIZE, 0
.global fpu_in_use
fpu_in_use: .quad 0
.section .text, "ax"
.balign L1_CACHE_BYTES
/*
* Condition at the entry of __stext:
* . Reset state:
* . SR.FD = 1 (FPU disabled)
* . SR.BL = 1 (Exceptions disabled)
* . SR.MD = 1 (Privileged Mode)
* . SR.MMU = 0 (MMU Disabled)
* . SR.CD = 0 (CTC User Visible)
* . SR.IMASK = Undefined (Interrupt Mask)
*
* Operations supposed to be performed by __stext:
* . prevent speculative fetch onto device memory while MMU is off
* . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
* . first, save CPU state and set it to something harmless
* . any CPU detection and/or endianness settings (?)
* . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
* . set initial TLB entries for cached and uncached regions
* (no fine granularity paging)
* . set initial cache state
* . enable MMU and caches
* . set CPU to a consistent state
* . registers (including stack pointer and current/KCR0)
* . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
* at this stage. This is all to later Linux initialization steps.
* . initialize FPU
* . clear BSS
* . jump into start_kernel()
* . be prepared to hopeless start_kernel() returns.
*
*/
.global _stext
_stext:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
*/
ptabs/u ZERO, tr0
ptabs/u ZERO, tr1
ptabs/u ZERO, tr2
ptabs/u ZERO, tr3
ptabs/u ZERO, tr4
ptabs/u ZERO, tr5
ptabs/u ZERO, tr6
ptabs/u ZERO, tr7
synci
/*
* Read/Set CPU state. After this block:
* r29 = Initial SR
*/
getcon SR, r29
movi SR_HARMLESS, r20
putcon r20, SR
/*
* Initialize EMI/LMI. To Be Done.
*/
/*
* CPU detection and/or endianness settings (?). To Be Done.
* Pure PIC code here, please ! Just save state into r30.
* After this block:
* r30 = CPU type/Platform Endianness
*/
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta clear_ITLB, tr1
movi MMUIR_FIRST, r21
movi MMUIR_END, r22
clear_ITLB:
putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
addi r21, MMUIR_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta clear_DTLB, tr1
movi MMUDR_FIRST, r21
movi MMUDR_END, r22
clear_DTLB:
putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
addi r21, MMUDR_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi MMUIR_FIRST, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi MMUDR_FIRST, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
#ifdef CONFIG_EARLY_PRINTK
/*
* Setup a DTLB translation for SCIF phys.
*/
addi r21, MMUDR_STEP, r21
movi 0x0a03, r22 /* SCIF phys */
shori 0x0148, r22
putcfg r21, 1, r22 /* PTEL first */
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
#endif
/*
* Set cache behaviours.
*/
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
/*
* Enable Caches and MMU. Do the first non-PIC jump.
* Now head.S global variables, constants and externs
* can be used.
*/
getcon SR, r21
movi SR_ENABLE_MMU, r22
or r21, r22, r21
putcon r21, SSR
movi hyperspace, r22
ori r22, 1, r22 /* Make it SHmedia, not required but..*/
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
hyperspace: /* ... that's the next instruction ! */
/*
* Set CPU to a consistent state.
* r31 = FPU support flag
* tr0/tr7 in use. Others give a chance to loop somewhere safe
*/
movi start_kernel, r32
ori r32, 1, r32
ptabs r32, tr0 /* r32 = _start_kernel address */
pta/u hopeless, tr1
pta/u hopeless, tr2
pta/u hopeless, tr3
pta/u hopeless, tr4
pta/u hopeless, tr5
pta/u hopeless, tr6
pta/u hopeless, tr7
gettr tr1, r28 /* r28 = hopeless address */
/* Set initial stack pointer */
movi init_thread_union, SP
putcon SP, KCR0 /* Set current to init_task */
movi THREAD_SIZE, r22 /* Point to the end */
add SP, r22, SP
/*
* Initialize FPU.
* Keep FPU flag in r31. After this block:
* r31 = FPU flag
*/
movi fpu_in_use, r31 /* Temporary */
#ifdef CONFIG_SH_FPU
getcon SR, r21
movi SR_ENABLE_FPU, r22
and r21, r22, r22
putcon r22, SR /* Try to enable */
getcon SR, r22
xor r21, r22, r21
shlri r21, 15, r21 /* Supposedly 0/1 */
st.q r31, 0 , r21 /* Set fpu_in_use */
#else
movi 0, r21
st.q r31, 0 , r21 /* Set fpu_in_use */
#endif
or r21, ZERO, r31 /* Set FPU flag at last */
#ifndef CONFIG_SH_NO_BSS_INIT
/* Don't clear BSS if running on slow platforms such as an RTL simulation,
remote memory via SHdebug link, etc. For these the memory can be guaranteed
to be all zero on boot anyway. */
/*
* Clear bss
*/
pta clear_quad, tr1
movi __bss_start, r22
movi _end, r23
clear_quad:
st.q r22, 0, ZERO
addi r22, 8, r22
bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
#endif
pta/u hopeless, tr1
/* Say bye to head.S but be prepared to wrongly get back ... */
blink tr0, LINK
/* If we ever get back here through LINK/tr1-tr7 */
pta/u hopeless, tr7
hopeless:
/*
* Something's badly wrong here. Loop endlessly,
* there's nothing more we can do about it.
*
* Note on hopeless: it can be jumped into invariably
* before or after jumping into hyperspace. The only
* requirement is to be PIC called (PTA) before and
* any way (PTA/PTABS) after. According to Virtual
* to Physical mapping a simulator/emulator can easily
* tell where we came here from just looking at hopeless
* (PC) address.
*
* For debugging purposes:
* (r28) hopeless/loop address
* (r29) Original SR
* (r30) CPU type/Platform endianness
* (r31) FPU Support
* (r32) _start_kernel address
*/
blink tr7, ZERO

查看文件

@@ -0,0 +1,46 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/init_task.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
struct pt_regs fake_swapper_regs;
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE-byte aligned due
* to the way process stacks are handled. This is done by having a
* special "init_task" linker map entry..
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);

116
arch/sh64/kernel/irq.c Normal file
查看文件

@@ -0,0 +1,116 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/irq.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
/*
* IRQs are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <linux/irq.h>
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ trap at irq %02x\n", irq);
}
#if defined(CONFIG_PROC_FS)
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for (j=0; j<NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "CPU%d ",j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
seq_printf(p, "%3d: ",i);
seq_printf(p, "%10u ", kstat_irqs(i));
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
#endif
/*
* do_NMI handles all Non-Maskable Interrupts.
*/
asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs)
{
if (regs->sr & 0x40000000)
printk("unexpected NMI trap in system mode\n");
else
printk("unexpected NMI trap in user mode\n");
/* No statistics */
}
/*
* do_IRQ handles all normal device IRQ's.
*/
asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs)
{
int irq;
irq_enter();
irq = irq_demux(vector_num);
if (irq >= 0) {
__do_IRQ(irq, regs);
} else {
printk("unexpected IRQ trap at vector %03lx\n", vector_num);
}
irq_exit();
return 1;
}

272
arch/sh64/kernel/irq_intc.c Normal file
查看文件

@@ -0,0 +1,272 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/irq_intc.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* Interrupt Controller support for SH5 INTC.
* Per-interrupt selective. IRLM=0 (Fixed priority) is not
* supported being useless without a cascaded interrupt
* controller.
*
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/bitops.h> /* this includes also <asm/registers.h */
/* which is required to remap register */
/* names used into __asm__ blocks... */
#include <asm/hardware.h>
#include <asm/platform.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/irq.h>
/*
* Maybe the generic Peripheral block could move to a more
* generic include file. INTC Block will be defined here
* and only here to make INTC self-contained in a single
* file.
*/
#define INTC_BLOCK_OFFSET 0x01000000
/* Base */
#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
INTC_BLOCK_OFFSET
/* Address */
#define INTC_ICR_SET (intc_virt + 0x0)
#define INTC_ICR_CLEAR (intc_virt + 0x8)
#define INTC_INTPRI_0 (intc_virt + 0x10)
#define INTC_INTSRC_0 (intc_virt + 0x50)
#define INTC_INTSRC_1 (intc_virt + 0x58)
#define INTC_INTREQ_0 (intc_virt + 0x60)
#define INTC_INTREQ_1 (intc_virt + 0x68)
#define INTC_INTENB_0 (intc_virt + 0x70)
#define INTC_INTENB_1 (intc_virt + 0x78)
#define INTC_INTDSB_0 (intc_virt + 0x80)
#define INTC_INTDSB_1 (intc_virt + 0x88)
#define INTC_ICR_IRLM 0x1
#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
/*
* Mapper between the vector ordinal and the IRQ number
* passed to kernel/device drivers.
*/
int intc_evt_to_irq[(0xE20/0x20)+1] = {
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
-1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
-1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
-1, -1 /* 0xE00 - 0xE20 */
};
/*
* Opposite mapper.
*/
static int IRQ_to_vectorN[NR_INTC_IRQS] = {
0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
-1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
-1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
};
static unsigned long intc_virt;
static unsigned int startup_intc_irq(unsigned int irq);
static void shutdown_intc_irq(unsigned int irq);
static void enable_intc_irq(unsigned int irq);
static void disable_intc_irq(unsigned int irq);
static void mask_and_ack_intc(unsigned int);
static void end_intc_irq(unsigned int irq);
static struct hw_interrupt_type intc_irq_type = {
"INTC",
startup_intc_irq,
shutdown_intc_irq,
enable_intc_irq,
disable_intc_irq,
mask_and_ack_intc,
end_intc_irq
};
static int irlm; /* IRL mode */
static unsigned int startup_intc_irq(unsigned int irq)
{
enable_intc_irq(irq);
return 0; /* never anything pending */
}
static void shutdown_intc_irq(unsigned int irq)
{
disable_intc_irq(irq);
}
static void enable_intc_irq(unsigned int irq)
{
unsigned long reg;
unsigned long bitmask;
if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
printk("Trying to use straight IRL0-3 with an encoding platform.\n");
if (irq < 32) {
reg = INTC_INTENB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTENB_1;
bitmask = 1 << (irq - 32);
}
ctrl_outl(bitmask, reg);
}
static void disable_intc_irq(unsigned int irq)
{
unsigned long reg;
unsigned long bitmask;
if (irq < 32) {
reg = INTC_INTDSB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTDSB_1;
bitmask = 1 << (irq - 32);
}
ctrl_outl(bitmask, reg);
}
static void mask_and_ack_intc(unsigned int irq)
{
disable_intc_irq(irq);
}
static void end_intc_irq(unsigned int irq)
{
enable_intc_irq(irq);
}
/* For future use, if we ever support IRLM=0) */
void make_intc_irq(unsigned int irq)
{
disable_irq_nosync(irq);
irq_desc[irq].handler = &intc_irq_type;
disable_intc_irq(irq);
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
int intc_irq_describe(char* p, int irq)
{
if (irq < NR_INTC_IRQS)
return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
else
return 0;
}
#endif
void __init init_IRQ(void)
{
unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
unsigned long reg;
unsigned long data;
int i;
intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
if (!intc_virt) {
panic("Unable to remap INTC\n");
}
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++) {
if (platform_int_priority[i] != NO_PRIORITY) {
irq_desc[i].handler = &intc_irq_type;
}
}
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
ctrl_outl(-1, INTC_INTDSB_0);
ctrl_outl(-1, INTC_INTDSB_1);
for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
ctrl_outl( NO_PRIORITY, reg);
/* Set IRLM */
/* If all the priorities are set to 'no priority', then
* assume we are using encoded mode.
*/
irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
if (irlm == NO_PRIORITY) {
/* IRLM = 0 */
reg = INTC_ICR_CLEAR;
i = IRQ_INTA;
printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
} else {
/* IRLM = 1 */
reg = INTC_ICR_SET;
i = IRQ_IRL0;
}
ctrl_outl(INTC_ICR_IRLM, reg);
/* Set interrupt priorities according to platform description */
for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
/* Upon the 7th, set Priority Register */
ctrl_outl(data, reg);
data = 0;
reg += 8;
}
}
#ifdef CONFIG_SH_CAYMAN
{
extern void init_cayman_irq(void);
init_cayman_irq();
}
#endif
/*
* And now let interrupts come in.
* sti() is not enough, we need to
* lower priority, too.
*/
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}

41
arch/sh64/kernel/led.c Normal file
查看文件

@@ -0,0 +1,41 @@
/*
* arch/sh64/kernel/led.c
*
* Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Flash the LEDs
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/sched.h>
void mach_led(int pos, int val);
/* acts like an actual heart beat -- ie thump-thump-pause... */
void heartbeat(void)
{
static unsigned int cnt = 0, period = 0, dist = 0;
if (cnt == 0 || cnt == dist) {
mach_led(-1, 1);
} else if (cnt == 7 || cnt == dist + 7) {
mach_led(-1, 0);
}
if (++cnt > period) {
cnt = 0;
/*
* The hyperbolic function below modifies the heartbeat period
* length in dependency of the current (5min) load. It goes
* through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30.
*/
period = ((672 << FSHIFT) / (5 * avenrun[0] +
(7 << FSHIFT))) + 30;
dist = period / 4;
}
}

161
arch/sh64/kernel/module.c Normal file
查看文件

@@ -0,0 +1,161 @@
/* Kernel module help for sh64.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Copyright 2004 SuperH (UK) Ltd
Author: Richard Curnow
Based on the sh version, and on code from the sh64-specific parts of
modutils, originally written by Richard Curnow and Ben Gaster.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc(size);
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
}
/* We don't need anything special. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Addr relocation;
uint32_t *location;
int align;
int is_shmedia;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
align = (int)location & 3;
/* For text addresses, bit2 of the st_other field indicates
* whether the symbol is SHmedia (1) or SHcompact (0). If
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
is_shmedia = (sym->st_other & 4) ? 1 : 0;
if (is_shmedia) {
relocation |= 1;
}
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_DIR32:
DEBUGP("R_SH_DIR32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
*location += relocation;
break;
case R_SH_REL32:
DEBUGP("R_SH_REL32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
relocation -= (Elf32_Addr) location;
*location += relocation;
break;
case R_SH_IMM_LOW16:
DEBUGP("R_SH_IMM_LOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16:
DEBUGP("R_SH_IMM_MEDLOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
case R_SH_IMM_LOW16_PCREL:
DEBUGP("R_SH_IMM_LOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16_PCREL:
DEBUGP("R_SH_IMM_MEDLOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
me->name);
return -ENOEXEC;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return 0;
}
void module_arch_cleanup(struct module *mod)
{
}

查看文件

@@ -0,0 +1,50 @@
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Dynamic DMA mapping support.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
void *consistent_alloc(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC;
void *vp;
if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
/* now call our friend ioremap_nocache to give us an uncached area */
vp = ioremap_nocache(virt_to_phys(ret), size);
if (vp != NULL) {
memset(vp, 0, size);
*dma_handle = virt_to_bus(ret);
dma_cache_wback_inv((unsigned long)ret, size);
}
return vp;
}
void consistent_free(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
void *alloc;
alloc = bus_to_virt((unsigned long)dma_handle);
free_pages((unsigned long)alloc, get_order(size));
iounmap(vaddr);
}

541
arch/sh64/kernel/pci_sh5.c Normal file
查看文件

@@ -0,0 +1,541 @@
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Support functions for the SH5 PCI hardware.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <asm/pci.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include "pci_sh5.h"
static unsigned long pcicr_virt;
unsigned long pciio_virt;
static void __init pci_fixup_ide_bases(struct pci_dev *d)
{
int i;
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
printk("PCI: IDE base address fixup for %s\n", pci_name(d));
for(i=0; i<4; i++) {
struct resource *r = &d->resource[i];
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
char * __init pcibios_setup(char *str)
{
return str;
}
/* Rounds a number UP to the nearest power of two. Used for
* sizing the PCI window.
*/
static u32 __init r2p2(u32 num)
{
int i = 31;
u32 tmp = num;
if (num == 0)
return 0;
do {
if (tmp & (1 << 31))
break;
i--;
tmp <<= 1;
} while (i >= 0);
tmp = 1 << i;
/* If the original number isn't a power of 2, round it up */
if (tmp != num)
tmp <<= 1;
return tmp;
}
extern unsigned long long memory_start, memory_end;
int __init sh5pci_init(unsigned memStart, unsigned memSize)
{
u32 lsr0;
u32 uval;
pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
if (!pcicr_virt) {
panic("Unable to remap PCICR\n");
}
pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
if (!pciio_virt) {
panic("Unable to remap PCIIO\n");
}
pr_debug("Register base addres is 0x%08lx\n", pcicr_virt);
/* Clear snoop registers */
SH5PCI_WRITE(CSCR0, 0);
SH5PCI_WRITE(CSCR1, 0);
pr_debug("Wrote to reg\n");
/* Switch off interrupts */
SH5PCI_WRITE(INTM, 0);
SH5PCI_WRITE(AINTM, 0);
SH5PCI_WRITE(PINTM, 0);
/* Set bus active, take it out of reset */
uval = SH5PCI_READ(CR);
/* Set command Register */
SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM);
uval=SH5PCI_READ(CR);
pr_debug("CR is actually 0x%08x\n",uval);
/* Allow it to be a master */
/* NB - WE DISABLE I/O ACCESS to stop overlap */
/* set WAIT bit to enable stepping, an attempt to improve stability */
SH5PCI_WRITE_SHORT(CSR_CMD,
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT);
/*
** Set translation mapping memory in order to convert the address
** used for the main bus, to the PCI internal address.
*/
SH5PCI_WRITE(MBR,0x40000000);
/* Always set the max size 512M */
SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
/*
** I/O addresses are mapped at internal PCI specific address
** as is described into the configuration bridge table.
** These are changed to 0, to allow cards that have legacy
** io such as vga to function correctly. We set the SH5 IOBAR to
** 256K, which is a bit big as we can only have 64K of address space
*/
SH5PCI_WRITE(IOBR,0x0);
pr_debug("PCI:Writing 0x%08x to IOBR\n",0);
/* Set up a 256K window. Totally pointless waste of address space */
SH5PCI_WRITE(IOBMR,0);
pr_debug("PCI:Writing 0x%08x to IOBMR\n",0);
/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally,
* we would want to map the I/O region somewhere, but it is so big this is not
* that easy!
*/
SH5PCI_WRITE(CSR_IBAR0,~0);
/* Set memory size value */
memSize = memory_end - memory_start;
/* Now we set up the mbars so the PCI bus can see the memory of the machine */
if (memSize < (1024 * 1024)) {
printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize);
return -EINVAL;
}
/* Set LSR 0 */
lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1);
SH5PCI_WRITE(LSR0, lsr0);
pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0);
/* Set MBAR 0 */
SH5PCI_WRITE(CSR_MBAR0, memory_start);
SH5PCI_WRITE(LAR0, memory_start);
SH5PCI_WRITE(CSR_MBAR1,0);
SH5PCI_WRITE(LAR1,0);
SH5PCI_WRITE(LSR1,0);
pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start);
pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start);
/* Enable the PCI interrupts on the device */
SH5PCI_WRITE(INTM, ~0);
SH5PCI_WRITE(AINTM, ~0);
SH5PCI_WRITE(PINTM, ~0);
pr_debug("Switching on all error interrupts\n");
return(0);
}
static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
break;
case 2:
*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
break;
case 4:
*val = SH5PCI_READ(PDR);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
break;
case 2:
SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
break;
case 4:
SH5PCI_WRITE(PDR, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pci_config_ops = {
.read = sh5pci_read,
.write = sh5pci_write,
};
/* Everything hangs off this */
static struct pci_bus *pci_root_bus;
static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin)
{
pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n",
dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin);
return PCI_SLOT(dev->devfn);
}
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin-1) + slot) % 4) + 1;
}
u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
{
if (dev->bus->number != 0) {
u8 pin = *pinp;
do {
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
/* Move up the chain of bridges. */
dev = dev->bus->self;
} while (dev->bus->self);
*pinp = pin;
/* The slot is the slot of the last bridge. */
}
return PCI_SLOT(dev->devfn);
}
/* This needs to be shunted out of here into the board specific bit */
static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
int result = -1;
/* The complication here is that the PCI IRQ lines from the Cayman's 2
5V slots get into the CPU via a different path from the IRQ lines
from the 3 3.3V slots. Thus, we have to detect whether the card's
interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
at the point where we cross from 5V to 3.3V is not the normal case.
The added complication is that we don't know that the 5V slots are
always bus 2, because a card containing a PCI-PCI bridge may be
plugged into a 3.3V slot, and this changes the bus numbering.
Also, the Cayman has an intermediate PCI bus that goes a custom
expansion board header (and to the secondary bridge). This bus has
never been used in practice.
The 1ary onboard PCI-PCI bridge is device 3 on bus 0
The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge.
*/
struct slot_pin {
int slot;
int pin;
} path[4];
int i=0;
while (dev->bus->number > 0) {
slot = path[i].slot = PCI_SLOT(dev->devfn);
pin = path[i].pin = bridge_swizzle(pin, slot);
dev = dev->bus->self;
i++;
if (i > 3) panic("PCI path to root bus too long!\n");
}
slot = PCI_SLOT(dev->devfn);
/* This is the slot on bus 0 through which the device is eventually
reachable. */
/* Now work back up. */
if ((slot < 3) || (i == 0)) {
/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
swizzle now. */
result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
} else {
i--;
slot = path[i].slot;
pin = path[i].pin;
if (slot > 0) {
panic("PCI expansion bus device found - not handled!\n");
} else {
if (i > 0) {
/* 5V slots */
i--;
slot = path[i].slot;
pin = path[i].pin;
/* 'pin' was swizzled earlier wrt slot, don't do it again. */
result = IRQ_P2INTA + (pin - 1);
} else {
/* IRQ for 2ary PCI-PCI bridge : unused */
result = -1;
}
}
}
return result;
}
irqreturn_t pcish5_err_irq(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned pci_int, pci_air, pci_cir, pci_aint;
pci_int = SH5PCI_READ(INT);
pci_cir = SH5PCI_READ(CIR);
pci_air = SH5PCI_READ(AIR);
if (pci_int) {
printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
printk("PCI AIR -> 0x%x\n", pci_air);
printk("PCI CIR -> 0x%x\n", pci_cir);
SH5PCI_WRITE(INT, ~0);
}
pci_aint = SH5PCI_READ(AINT);
if (pci_aint) {
printk("PCI ARB INTERRUPT!\n");
printk("PCI AINT -> 0x%x\n", pci_aint);
printk("PCI AIR -> 0x%x\n", pci_air);
printk("PCI CIR -> 0x%x\n", pci_cir);
SH5PCI_WRITE(AINT, ~0);
}
return IRQ_HANDLED;
}
irqreturn_t pcish5_serr_irq(int irq, void *dev_id, struct pt_regs *regs)
{
printk("SERR IRQ\n");
return IRQ_NONE;
}
#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1))
static void __init
pcibios_size_bridge(struct pci_bus *bus, struct resource *ior,
struct resource *memr)
{
struct resource io_res, mem_res;
struct pci_dev *dev;
struct pci_dev *bridge = bus->self;
struct list_head *ln;
if (!bridge)
return; /* host bridge, nothing to do */
/* set reasonable default locations for pcibios_align_resource */
io_res.start = PCIBIOS_MIN_IO;
mem_res.start = PCIBIOS_MIN_MEM;
io_res.end = io_res.start;
mem_res.end = mem_res.start;
/* Collect information about how our direct children are layed out. */
for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
int i;
dev = pci_dev_b(ln);
/* Skip bridges for now */
if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
continue;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource res;
unsigned long size;
memcpy(&res, &dev->resource[i], sizeof(res));
size = res.end - res.start + 1;
if (res.flags & IORESOURCE_IO) {
res.start = io_res.end;
pcibios_align_resource(dev, &res, size, 0);
io_res.end = res.start + size;
} else if (res.flags & IORESOURCE_MEM) {
res.start = mem_res.end;
pcibios_align_resource(dev, &res, size, 0);
mem_res.end = res.start + size;
}
}
}
/* And for all of the subordinate busses. */
for (ln=bus->children.next; ln != &bus->children; ln=ln->next)
pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res);
/* turn the ending locations into sizes (subtract start) */
io_res.end -= io_res.start;
mem_res.end -= mem_res.start;
/* Align the sizes up by bridge rules */
io_res.end = ROUND_UP(io_res.end, 4*1024) - 1;
mem_res.end = ROUND_UP(mem_res.end, 1*1024*1024) - 1;
/* Adjust the bridge's allocation requirements */
bridge->resource[0].end = bridge->resource[0].start + io_res.end;
bridge->resource[1].end = bridge->resource[1].start + mem_res.end;
bridge->resource[PCI_BRIDGE_RESOURCES].end =
bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end;
bridge->resource[PCI_BRIDGE_RESOURCES+1].end =
bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end;
/* adjust parent's resource requirements */
if (ior) {
ior->end = ROUND_UP(ior->end, 4*1024);
ior->end += io_res.end;
}
if (memr) {
memr->end = ROUND_UP(memr->end, 1*1024*1024);
memr->end += mem_res.end;
}
}
#undef ROUND_UP
static void __init pcibios_size_bridges(void)
{
struct resource io_res, mem_res;
memset(&io_res, 0, sizeof(io_res));
memset(&mem_res, 0, sizeof(mem_res));
pcibios_size_bridge(pci_root_bus, &io_res, &mem_res);
}
static int __init pcibios_init(void)
{
if (request_irq(IRQ_ERR, pcish5_err_irq,
SA_INTERRUPT, "PCI Error",NULL) < 0) {
printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
return -EINVAL;
}
if (request_irq(IRQ_SERR, pcish5_serr_irq,
SA_INTERRUPT, "PCI SERR interrupt", NULL) < 0) {
printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
return -EINVAL;
}
/* The pci subsytem needs to know where memory is and how much
* of it there is. I've simply made these globals. A better mechanism
* is probably needed.
*/
sh5pci_init(__pa(memory_start),
__pa(memory_end) - __pa(memory_start));
pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL);
pcibios_size_bridges();
pci_assign_unassigned_resources();
pci_fixup_irqs(no_swizzle, map_cayman_irq);
return 0;
}
subsys_initcall(pcibios_init);
void __init pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
int i;
#if 1
if(dev) {
for(i=0; i<3; i++) {
bus->resource[i] =
&dev->resource[PCI_BRIDGE_RESOURCES+i];
bus->resource[i]->name = bus->name;
}
bus->resource[0]->flags |= IORESOURCE_IO;
bus->resource[1]->flags |= IORESOURCE_MEM;
/* For now, propagate host limits to the bus;
* we'll adjust them later. */
#if 1
bus->resource[0]->end = 64*1024 - 1 ;
bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
bus->resource[0]->start = PCIBIOS_MIN_IO;
bus->resource[1]->start = PCIBIOS_MIN_MEM;
#else
bus->resource[0]->end = 0
bus->resource[1]->end = 0
bus->resource[0]->start =0
bus->resource[1]->start = 0;
#endif
/* Turn off downstream PF memory address range by default */
bus->resource[2]->start = 1024*1024;
bus->resource[2]->end = bus->resource[2]->start - 1;
}
#endif
}

107
arch/sh64/kernel/pci_sh5.h Normal file
查看文件

@@ -0,0 +1,107 @@
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Defintions for the SH5 PCI hardware.
*/
/* Product ID */
#define PCISH5_PID 0x350d
/* vendor ID */
#define PCISH5_VID 0x1054
/* Configuration types */
#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
/* VCR data */
#define PCISH5_VCR_STATUS 0x00
#define PCISH5_VCR_VERSION 0x08
/*
** ICR register offsets and bits
*/
#define PCISH5_ICR_CR 0x100 /* PCI control register values */
#define CR_PBAM (1<<12)
#define CR_PFCS (1<<11)
#define CR_FTO (1<<10)
#define CR_PFE (1<<9)
#define CR_TBS (1<<8)
#define CR_SPUE (1<<7)
#define CR_BMAM (1<<6)
#define CR_HOST (1<<5)
#define CR_CLKEN (1<<4)
#define CR_SOCS (1<<3)
#define CR_IOCS (1<<2)
#define CR_RSTCTL (1<<1)
#define CR_CFINT (1<<0)
#define CR_LOCK_MASK 0xa5000000
#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
#define INT_MADIM (1<<2)
#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
/* These are configs space registers */
#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
/* Base address of registers */
#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
/* Register selection macro */
#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
/* Write I/O functions */
#define SH5PCI_WRITE(reg,val) ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
#define SH5PCI_WRITE_SHORT(reg,val) ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
#define SH5PCI_WRITE_BYTE(reg,val) ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
/* Read I/O functions */
#define SH5PCI_READ(reg) ctrl_inl(PCISH5_ICR_REG(reg))
#define SH5PCI_READ_SHORT(reg) ctrl_inw(PCISH5_ICR_REG(reg))
#define SH5PCI_READ_BYTE(reg) ctrl_inb(PCISH5_ICR_REG(reg))
/* Set PCI config bits */
#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
/* Set PCI command register */
#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
/* Size converters */
#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)

168
arch/sh64/kernel/pcibios.c Normal file
查看文件

@@ -0,0 +1,168 @@
/*
* $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $
*
* arch/sh/kernel/pcibios.c
*
* Copyright (C) 2002 STMicroelectronics Limited
* Author : David J. McKay
*
* Copyright (C) 2004 Richard Curnow, SuperH UK Limited
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* This is GPL'd.
*
* Provided here are generic versions of:
* pcibios_update_resource()
* pcibios_align_resource()
* pcibios_enable_device()
* pcibios_set_master()
* pcibios_update_irq()
*
* These functions are collected here to reduce duplication of common
* code amongst the many platform-specific PCI support code files.
*
* Platform-specific files are expected to provide:
* pcibios_fixup_bus()
* pcibios_init()
* pcibios_setup()
* pcibios_fixup_pbus_ranges()
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
void
pcibios_update_resource(struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
u32 new, check;
int reg;
new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
if (resource < 6) {
reg = PCI_BASE_ADDRESS_0 + 4*resource;
} else if (resource == PCI_ROM_RESOURCE) {
res->flags |= IORESOURCE_ROM_ENABLE;
new |= PCI_ROM_ADDRESS_ENABLE;
reg = dev->rom_base_reg;
} else {
/* Somebody might have asked allocation of a non-standard resource */
return;
}
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
printk(KERN_ERR "PCI: Error while updating region "
"%s/%d (%08x != %08x)\n", pci_name(dev), resource,
new, check);
}
}
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*/
void pcibios_align_resource(void *data, struct resource *res,
unsigned long size, unsigned long align)
{
if (res->flags & IORESOURCE_IO) {
unsigned long start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
}
static void pcibios_enable_bridge(struct pci_dev *dev)
{
struct pci_bus *bus = dev->subordinate;
u16 cmd, old_cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
if (bus->resource[0]->flags & IORESOURCE_IO) {
cmd |= PCI_COMMAND_IO;
}
if ((bus->resource[1]->flags & IORESOURCE_MEM) ||
(bus->resource[2]->flags & IORESOURCE_PREFETCH)) {
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
printk("PCI bridge %s, command register -> %04x\n",
pci_name(dev), cmd);
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
pcibios_enable_bridge(dev);
}
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx=0; idx<6; idx++) {
if (!(mask & (1 << idx)))
continue;
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/*
* If we set up a device for bus mastering, we need to check and set
* the latency timer as it may not be properly set.
*/
unsigned int pcibios_max_latency = 255;
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
void __init pcibios_update_irq(struct pci_dev *dev, int irq)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}

962
arch/sh64/kernel/process.c Normal file
查看文件

@@ -0,0 +1,962 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/process.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* Started from SH3/4 version:
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
* In turn started from i386 version:
* Copyright (C) 1995 Linus Torvalds
*
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
/* Temporary flags/tests. All to be removed/undefined. BEGIN */
#define IDLE_TRACE
#define VM_SHOW_TABLES
#define VM_TEST_FAULT
#define VM_TEST_RTLBMISS
#define VM_TEST_WTLBMISS
#undef VM_SHOW_TABLES
#undef IDLE_TRACE
/* Temporary flags/tests. All to be removed/undefined. END */
#define __KERNEL_SYSCALLS__
#include <stdarg.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/unistd.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h> /* includes also <asm/registers.h> */
#include <asm/mmu_context.h>
#include <asm/elf.h>
#include <asm/page.h>
#include <linux/irq.h>
struct task_struct *last_task_used_math = NULL;
#ifdef IDLE_TRACE
#ifdef VM_SHOW_TABLES
/* For testing */
static void print_PTE(long base)
{
int i, skip=0;
long long x, y, *p = (long long *) base;
for (i=0; i< 512; i++, p++){
if (*p == 0) {
if (!skip) {
skip++;
printk("(0s) ");
}
} else {
skip=0;
x = (*p) >> 32;
y = (*p) & 0xffffffff;
printk("%08Lx%08Lx ", x, y);
if (!((i+1)&0x3)) printk("\n");
}
}
}
/* For testing */
static void print_DIR(long base)
{
int i, skip=0;
long *p = (long *) base;
for (i=0; i< 512; i++, p++){
if (*p == 0) {
if (!skip) {
skip++;
printk("(0s) ");
}
} else {
skip=0;
printk("%08lx ", *p);
if (!((i+1)&0x7)) printk("\n");
}
}
}
/* For testing */
static void print_vmalloc_first_tables(void)
{
#define PRESENT 0x800 /* Bit 11 */
/*
* Do it really dirty by looking at raw addresses,
* raw offsets, no types. If we used pgtable/pgalloc
* macros/definitions we could hide potential bugs.
*
* Note that pointers are 32-bit for CDC.
*/
long pgdt, pmdt, ptet;
pgdt = (long) &swapper_pg_dir;
printk("-->PGD (0x%08lx):\n", pgdt);
print_DIR(pgdt);
printk("\n");
/* VMALLOC pool is mapped at 0xc0000000, second (pointer) entry in PGD */
pgdt += 4;
pmdt = (long) (* (long *) pgdt);
if (!(pmdt & PRESENT)) {
printk("No PMD\n");
return;
} else pmdt &= 0xfffff000;
printk("-->PMD (0x%08lx):\n", pmdt);
print_DIR(pmdt);
printk("\n");
/* Get the pmdt displacement for 0xc0000000 */
pmdt += 2048;
/* just look at first two address ranges ... */
/* ... 0xc0000000 ... */
ptet = (long) (* (long *) pmdt);
if (!(ptet & PRESENT)) {
printk("No PTE0\n");
return;
} else ptet &= 0xfffff000;
printk("-->PTE0 (0x%08lx):\n", ptet);
print_PTE(ptet);
printk("\n");
/* ... 0xc0001000 ... */
ptet += 4;
if (!(ptet & PRESENT)) {
printk("No PTE1\n");
return;
} else ptet &= 0xfffff000;
printk("-->PTE1 (0x%08lx):\n", ptet);
print_PTE(ptet);
printk("\n");
}
#else
#define print_vmalloc_first_tables()
#endif /* VM_SHOW_TABLES */
static void test_VM(void)
{
void *a, *b, *c;
#ifdef VM_SHOW_TABLES
printk("Initial PGD/PMD/PTE\n");
#endif
print_vmalloc_first_tables();
printk("Allocating 2 bytes\n");
a = vmalloc(2);
print_vmalloc_first_tables();
printk("Allocating 4100 bytes\n");
b = vmalloc(4100);
print_vmalloc_first_tables();
printk("Allocating 20234 bytes\n");
c = vmalloc(20234);
print_vmalloc_first_tables();
#ifdef VM_TEST_FAULT
/* Here you may want to fault ! */
#ifdef VM_TEST_RTLBMISS
printk("Ready to fault upon read.\n");
if (* (char *) a) {
printk("RTLBMISSed on area a !\n");
}
printk("RTLBMISSed on area a !\n");
#endif
#ifdef VM_TEST_WTLBMISS
printk("Ready to fault upon write.\n");
*((char *) b) = 'L';
printk("WTLBMISSed on area b !\n");
#endif
#endif /* VM_TEST_FAULT */
printk("Deallocating the 4100 byte chunk\n");
vfree(b);
print_vmalloc_first_tables();
printk("Deallocating the 2 byte chunk\n");
vfree(a);
print_vmalloc_first_tables();
printk("Deallocating the last chunk\n");
vfree(c);
print_vmalloc_first_tables();
}
extern unsigned long volatile jiffies;
int once = 0;
unsigned long old_jiffies;
int pid = -1, pgid = -1;
void idle_trace(void)
{
_syscall0(int, getpid)
_syscall1(int, getpgid, int, pid)
if (!once) {
/* VM allocation/deallocation simple test */
test_VM();
pid = getpid();
printk("Got all through to Idle !!\n");
printk("I'm now going to loop forever ...\n");
printk("Any ! below is a timer tick.\n");
printk("Any . below is a getpgid system call from pid = %d.\n", pid);
old_jiffies = jiffies;
once++;
}
if (old_jiffies != jiffies) {
old_jiffies = jiffies - old_jiffies;
switch (old_jiffies) {
case 1:
printk("!");
break;
case 2:
printk("!!");
break;
case 3:
printk("!!!");
break;
case 4:
printk("!!!!");
break;
default:
printk("(%d!)", (int) old_jiffies);
}
old_jiffies = jiffies;
}
pgid = getpgid(pid);
printk(".");
}
#else
#define idle_trace() do { } while (0)
#endif /* IDLE_TRACE */
static int hlt_counter = 1;
#define HARD_IDLE_TIMEOUT (HZ / 3)
void disable_hlt(void)
{
hlt_counter++;
}
void enable_hlt(void)
{
hlt_counter--;
}
static int __init nohlt_setup(char *__unused)
{
hlt_counter = 1;
return 1;
}
static int __init hlt_setup(char *__unused)
{
hlt_counter = 0;
return 1;
}
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
static inline void hlt(void)
{
if (hlt_counter)
return;
__asm__ __volatile__ ("sleep" : : : "memory");
}
/*
* The idle loop on a uniprocessor SH..
*/
void default_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
if (hlt_counter) {
while (1)
if (need_resched())
break;
} else {
local_irq_disable();
while (!need_resched()) {
local_irq_enable();
idle_trace();
hlt();
local_irq_disable();
}
local_irq_enable();
}
schedule();
}
}
void cpu_idle(void)
{
default_idle();
}
void machine_restart(char * __unused)
{
extern void phys_stext(void);
phys_stext();
}
void machine_halt(void)
{
for (;;);
}
void machine_power_off(void)
{
extern void enter_deep_standby(void);
enter_deep_standby();
}
void show_regs(struct pt_regs * regs)
{
unsigned long long ah, al, bh, bl, ch, cl;
printk("\n");
ah = (regs->pc) >> 32;
al = (regs->pc) & 0xffffffff;
bh = (regs->regs[18]) >> 32;
bl = (regs->regs[18]) & 0xffffffff;
ch = (regs->regs[15]) >> 32;
cl = (regs->regs[15]) & 0xffffffff;
printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->sr) >> 32;
al = (regs->sr) & 0xffffffff;
asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
bh = (bh) >> 32;
bl = (bl) & 0xffffffff;
asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
ch = (ch) >> 32;
cl = (cl) & 0xffffffff;
printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[0]) >> 32;
al = (regs->regs[0]) & 0xffffffff;
bh = (regs->regs[1]) >> 32;
bl = (regs->regs[1]) & 0xffffffff;
ch = (regs->regs[2]) >> 32;
cl = (regs->regs[2]) & 0xffffffff;
printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[3]) >> 32;
al = (regs->regs[3]) & 0xffffffff;
bh = (regs->regs[4]) >> 32;
bl = (regs->regs[4]) & 0xffffffff;
ch = (regs->regs[5]) >> 32;
cl = (regs->regs[5]) & 0xffffffff;
printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[6]) >> 32;
al = (regs->regs[6]) & 0xffffffff;
bh = (regs->regs[7]) >> 32;
bl = (regs->regs[7]) & 0xffffffff;
ch = (regs->regs[8]) >> 32;
cl = (regs->regs[8]) & 0xffffffff;
printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[9]) >> 32;
al = (regs->regs[9]) & 0xffffffff;
bh = (regs->regs[10]) >> 32;
bl = (regs->regs[10]) & 0xffffffff;
ch = (regs->regs[11]) >> 32;
cl = (regs->regs[11]) & 0xffffffff;
printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[12]) >> 32;
al = (regs->regs[12]) & 0xffffffff;
bh = (regs->regs[13]) >> 32;
bl = (regs->regs[13]) & 0xffffffff;
ch = (regs->regs[14]) >> 32;
cl = (regs->regs[14]) & 0xffffffff;
printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[16]) >> 32;
al = (regs->regs[16]) & 0xffffffff;
bh = (regs->regs[17]) >> 32;
bl = (regs->regs[17]) & 0xffffffff;
ch = (regs->regs[19]) >> 32;
cl = (regs->regs[19]) & 0xffffffff;
printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[20]) >> 32;
al = (regs->regs[20]) & 0xffffffff;
bh = (regs->regs[21]) >> 32;
bl = (regs->regs[21]) & 0xffffffff;
ch = (regs->regs[22]) >> 32;
cl = (regs->regs[22]) & 0xffffffff;
printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[23]) >> 32;
al = (regs->regs[23]) & 0xffffffff;
bh = (regs->regs[24]) >> 32;
bl = (regs->regs[24]) & 0xffffffff;
ch = (regs->regs[25]) >> 32;
cl = (regs->regs[25]) & 0xffffffff;
printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[26]) >> 32;
al = (regs->regs[26]) & 0xffffffff;
bh = (regs->regs[27]) >> 32;
bl = (regs->regs[27]) & 0xffffffff;
ch = (regs->regs[28]) >> 32;
cl = (regs->regs[28]) & 0xffffffff;
printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[29]) >> 32;
al = (regs->regs[29]) & 0xffffffff;
bh = (regs->regs[30]) >> 32;
bl = (regs->regs[30]) & 0xffffffff;
ch = (regs->regs[31]) >> 32;
cl = (regs->regs[31]) & 0xffffffff;
printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[32]) >> 32;
al = (regs->regs[32]) & 0xffffffff;
bh = (regs->regs[33]) >> 32;
bl = (regs->regs[33]) & 0xffffffff;
ch = (regs->regs[34]) >> 32;
cl = (regs->regs[34]) & 0xffffffff;
printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[35]) >> 32;
al = (regs->regs[35]) & 0xffffffff;
bh = (regs->regs[36]) >> 32;
bl = (regs->regs[36]) & 0xffffffff;
ch = (regs->regs[37]) >> 32;
cl = (regs->regs[37]) & 0xffffffff;
printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[38]) >> 32;
al = (regs->regs[38]) & 0xffffffff;
bh = (regs->regs[39]) >> 32;
bl = (regs->regs[39]) & 0xffffffff;
ch = (regs->regs[40]) >> 32;
cl = (regs->regs[40]) & 0xffffffff;
printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[41]) >> 32;
al = (regs->regs[41]) & 0xffffffff;
bh = (regs->regs[42]) >> 32;
bl = (regs->regs[42]) & 0xffffffff;
ch = (regs->regs[43]) >> 32;
cl = (regs->regs[43]) & 0xffffffff;
printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[44]) >> 32;
al = (regs->regs[44]) & 0xffffffff;
bh = (regs->regs[45]) >> 32;
bl = (regs->regs[45]) & 0xffffffff;
ch = (regs->regs[46]) >> 32;
cl = (regs->regs[46]) & 0xffffffff;
printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[47]) >> 32;
al = (regs->regs[47]) & 0xffffffff;
bh = (regs->regs[48]) >> 32;
bl = (regs->regs[48]) & 0xffffffff;
ch = (regs->regs[49]) >> 32;
cl = (regs->regs[49]) & 0xffffffff;
printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[50]) >> 32;
al = (regs->regs[50]) & 0xffffffff;
bh = (regs->regs[51]) >> 32;
bl = (regs->regs[51]) & 0xffffffff;
ch = (regs->regs[52]) >> 32;
cl = (regs->regs[52]) & 0xffffffff;
printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[53]) >> 32;
al = (regs->regs[53]) & 0xffffffff;
bh = (regs->regs[54]) >> 32;
bl = (regs->regs[54]) & 0xffffffff;
ch = (regs->regs[55]) >> 32;
cl = (regs->regs[55]) & 0xffffffff;
printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[56]) >> 32;
al = (regs->regs[56]) & 0xffffffff;
bh = (regs->regs[57]) >> 32;
bl = (regs->regs[57]) & 0xffffffff;
ch = (regs->regs[58]) >> 32;
cl = (regs->regs[58]) & 0xffffffff;
printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[59]) >> 32;
al = (regs->regs[59]) & 0xffffffff;
bh = (regs->regs[60]) >> 32;
bl = (regs->regs[60]) & 0xffffffff;
ch = (regs->regs[61]) >> 32;
cl = (regs->regs[61]) & 0xffffffff;
printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[62]) >> 32;
al = (regs->regs[62]) & 0xffffffff;
bh = (regs->tregs[0]) >> 32;
bl = (regs->tregs[0]) & 0xffffffff;
ch = (regs->tregs[1]) >> 32;
cl = (regs->tregs[1]) & 0xffffffff;
printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[2]) >> 32;
al = (regs->tregs[2]) & 0xffffffff;
bh = (regs->tregs[3]) >> 32;
bl = (regs->tregs[3]) & 0xffffffff;
ch = (regs->tregs[4]) >> 32;
cl = (regs->tregs[4]) & 0xffffffff;
printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[5]) >> 32;
al = (regs->tregs[5]) & 0xffffffff;
bh = (regs->tregs[6]) >> 32;
bl = (regs->tregs[6]) & 0xffffffff;
ch = (regs->tregs[7]) >> 32;
cl = (regs->tregs[7]) & 0xffffffff;
printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
/*
* If we're in kernel mode, dump the stack too..
*/
if (!user_mode(regs)) {
void show_stack(struct task_struct *tsk, unsigned long *sp);
unsigned long sp = regs->regs[15] & 0xffffffff;
struct task_struct *tsk = get_current();
tsk->thread.kregs = regs;
show_stack(tsk, (unsigned long *)sp);
}
}
struct task_struct * alloc_task_struct(void)
{
/* Get task descriptor pages */
return (struct task_struct *)
__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
}
void free_task_struct(struct task_struct *p)
{
free_pages((unsigned long) p, get_order(THREAD_SIZE));
}
/*
* Create a kernel thread
*/
/*
* This is the mechanism for creating a new kernel thread.
*
* NOTE! Only a kernel-only process(ie the swapper or direct descendants
* who haven't done an "execve()") should use this: it will work within
* a system call from a "real" process, but the process memory space will
* not be free'd until both the parent and the child have exited.
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
/* A bit less processor dependent than older sh ... */
unsigned int reply;
static __inline__ _syscall2(int,clone,unsigned long,flags,unsigned long,newsp)
static __inline__ _syscall1(int,exit,int,ret)
reply = clone(flags | CLONE_VM, 0);
if (!reply) {
/* Child */
reply = exit(fn(arg));
}
return reply;
}
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
/* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
The SH-5 FPU save/restore approach relies on last_task_used_math
pointing to a live task_struct. When another task tries to use the
FPU for the 1st time, the FPUDIS trap handling (see
arch/sh64/kernel/fpu.c) will save the existing FPU state to the
FP regs field within last_task_used_math before re-loading the new
task's FPU state (or initialising it if the FPU has been used
before). So if last_task_used_math is stale, and its page has already been
re-allocated for another use, the consequences are rather grim. Unless we
null it here, there is no other path through which it would get safely
nulled. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
last_task_used_math = NULL;
}
#endif
}
void flush_thread(void)
{
/* Called by fs/exec.c (flush_old_exec) to remove traces of a
* previously running executable. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
last_task_used_math = NULL;
}
/* Force FPU state to be reinitialised after exec */
clear_used_math();
#endif
/* if we are a kernel thread, about to change to user thread,
* update kreg
*/
if(current->thread.kregs==&fake_swapper_regs) {
current->thread.kregs =
((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
current->thread.uregs = current->thread.kregs;
}
}
void release_thread(struct task_struct *dead_task)
{
/* do nothing */
}
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
#ifdef CONFIG_SH_FPU
int fpvalid;
struct task_struct *tsk = current;
fpvalid = !!tsk_used_math(tsk);
if (fpvalid) {
if (current == last_task_used_math) {
grab_fpu();
fpsave(&tsk->thread.fpu.hard);
release_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
}
return fpvalid;
#else
return 0; /* Task didn't use the fpu at all. */
#endif
}
asmlinkage void ret_from_fork(void);
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
unsigned long long se; /* Sign extension */
#ifdef CONFIG_SH_FPU
if(last_task_used_math == current) {
grab_fpu();
fpsave(&current->thread.fpu.hard);
release_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
#endif
/* Copy from sh version */
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1;
*childregs = *regs;
if (user_mode(regs)) {
childregs->regs[15] = usp;
p->thread.uregs = childregs;
} else {
childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
}
childregs->regs[9] = 0; /* Set return value for child */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork;
/*
* Sign extend the edited stack.
* Note that thread.pc and thread.pc will stay
* 32-bit wide and context switch must take care
* of NEFF sign extension.
*/
se = childregs->regs[15];
se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
childregs->regs[15] = se;
return 0;
}
/*
* fill in the user structure for a core dump..
*/
void dump_thread(struct pt_regs * regs, struct user * dump)
{
dump->magic = CMAGIC;
dump->start_code = current->mm->start_code;
dump->start_data = current->mm->start_data;
dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
dump->u_ssize = (current->mm->start_stack - dump->start_stack +
PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Debug registers will come here. */
dump->regs = *regs;
dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
}
asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs *pregs)
{
return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs *pregs)
{
if (!newsp)
newsp = pregs->regs[15];
return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
}
/*
* This is trivial, and on the face of it looks like it
* could equally well be done in user mode.
*
* Not so, for quite unobvious reasons - register pressure.
* In user mode vfork() cannot have a stack frame, and if
* done by calling the "clone()" system call directly, you
* do not have enough call-clobbered registers to hold all
* the information you need.
*/
asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs *pregs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(char *ufilename, char **uargv,
char **uenvp, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs *pregs)
{
int error;
char *filename;
lock_kernel();
filename = getname((char __user *)ufilename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename,
(char __user * __user *)uargv,
(char __user * __user *)uenvp,
pregs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;
task_unlock(current);
}
putname(filename);
out:
unlock_kernel();
return error;
}
/*
* These bracket the sleeping functions..
*/
extern void interruptible_sleep_on(wait_queue_head_t *q);
#define mid_sched ((unsigned long) interruptible_sleep_on)
static int in_sh64_switch_to(unsigned long pc)
{
extern char __sh64_switch_to_end;
/* For a sleeping task, the PC is somewhere in the middle of the function,
so we don't have to worry about masking the LSB off */
return (pc >= (unsigned long) sh64_switch_to) &&
(pc < (unsigned long) &__sh64_switch_to_end);
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long schedule_fp;
unsigned long sh64_switch_to_fp;
unsigned long schedule_caller_pc;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(p);
#ifdef CONFIG_FRAME_POINTER
if (in_sh64_switch_to(pc)) {
sh64_switch_to_fp = (long) p->thread.sp;
/* r14 is saved at offset 4 in the sh64_switch_to frame */
schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
/* and the caller of 'schedule' is (currently!) saved at offset 24
in the frame of schedule (from disasm) */
schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
return schedule_caller_pc;
}
#endif
return pc;
}
/* Provide a /proc/asids file that lists out the
ASIDs currently associated with the processes. (If the DM.PC register is
examined through the debug link, this shows ASID + PC. To make use of this,
the PID->ASID relationship needs to be known. This is primarily for
debugging.)
*/
#if defined(CONFIG_SH64_PROC_ASIDS)
#include <linux/init.h>
#include <linux/proc_fs.h>
static int
asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
{
int len=0;
struct task_struct *p;
read_lock(&tasklist_lock);
for_each_process(p) {
int pid = p->pid;
struct mm_struct *mm;
if (!pid) continue;
mm = p->mm;
if (mm) {
unsigned long asid, context;
context = mm->context;
asid = (context & 0xff);
len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
} else {
len += sprintf(buf+len, "%5d : (none)\n", pid);
}
}
read_unlock(&tasklist_lock);
*eof = 1;
return len;
}
static int __init register_proc_asids(void)
{
create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
return 0;
}
__initcall(register_proc_asids);
#endif

376
arch/sh64/kernel/ptrace.c Normal file
查看文件

@@ -0,0 +1,376 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/ptrace.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* Started from SH3/4 version:
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* Original x86 implementation:
* By Ross Biro 1/23/92
* edited by Linus Torvalds
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
/* This mask defines the bits of the SR which the user is not allowed to
change, which are everything except S, Q, M, PR, SZ, FR. */
#define SR_MASK (0xffff8cfd)
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* This routine will get a word from the user area in the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
return (*((int *)stack));
}
static inline unsigned long
get_fpu_long(struct task_struct *task, unsigned long addr)
{
unsigned long tmp;
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
if (addr == offsetof(struct user_fpu_struct, fpscr)) {
tmp = FPSCR_INIT;
} else {
tmp = 0xffffffffUL; /* matches initial value in fpu.c */
}
return tmp;
}
if (last_task_used_math == task) {
grab_fpu();
fpsave(&task->thread.fpu.hard);
release_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
return tmp;
}
/*
* This routine will put a word into the user area in the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
static inline int
put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
{
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
fpinit(&task->thread.fpu.hard);
set_stopped_child_used_math(task);
} else if (last_task_used_math == task) {
grab_fpu();
fpsave(&task->thread.fpu.hard);
release_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
return 0;
}
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
#define WPC_DBRMODE 0x0d104008
static int first_call = 1;
int ret;
lock_kernel();
if (first_call) {
/* Set WPC.DBRMODE to 0. This makes all debug events get
* delivered through RESVEC, i.e. into the handlers in entry.S.
* (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
* would normally be left set to 1, which makes debug events get
* delivered through DBRVEC, i.e. into the remote gdb's
* handlers. This prevents ptrace getting them, and confuses
* the remote gdb.) */
printk("DBRMODE set to 0 to permit native debugging\n");
poke_real_address_q(WPC_DBRMODE, 0);
first_call = 0;
}
ret = -EPERM;
if (request == PTRACE_TRACEME) {
/* are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
/* set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
goto out;
}
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* you may not mess with init */
goto out_tsk;
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
goto out_tsk;
}
ret = ptrace_check_attach(child, request == PTRACE_KILL);
if (ret < 0)
goto out_tsk;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp;
int copied;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
break;
ret = put_user(tmp,(unsigned long *) data);
break;
}
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
ret = put_user(tmp, (unsigned long *)data);
break;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = 0;
if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
break;
ret = -EIO;
break;
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area. We must
disallow any changes to certain SR bits or u_fpvalid, since
this could crash the kernel or result in a security
loophole. */
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs)) {
/* Ignore change of top 32 bits of SR */
if (addr == offsetof (struct pt_regs, sr)+4)
{
ret = 0;
break;
}
/* If lower 32 bits of SR, ignore non-user bits */
if (addr == offsetof (struct pt_regs, sr))
{
long cursr = get_stack_long(child, addr);
data &= ~(SR_MASK);
data |= (cursr & SR_MASK);
}
ret = put_stack_long(child, addr, data);
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
}
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: { /* restart after signal. */
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
wake_up_process(child);
ret = 0;
break;
}
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL: {
ret = 0;
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
wake_up_process(child);
break;
}
case PTRACE_SINGLESTEP: { /* set the trap flag. */
struct pt_regs *regs;
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
if ((child->ptrace & PT_DTRACE) == 0) {
/* Spurious delayed TF traps may occur */
child->ptrace |= PT_DTRACE;
}
regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
child->exit_code = data;
/* give it a chance to run. */
wake_up_process(child);
ret = 0;
break;
}
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
out_tsk:
put_task_struct(child);
out:
unlock_kernel();
return ret;
}
asmlinkage void syscall_trace(void)
{
struct task_struct *tsk = current;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(tsk->ptrace & PT_PTRACED))
return;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (tsk->exit_code) {
send_sig(tsk->exit_code, tsk, 1);
tsk->exit_code = 0;
}
}
/* Called with interrupts disabled */
asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
{
/* This is called after a single step exception (DEBUGSS).
There is no need to change the PC, as it is a post-execution
exception, as entry.S does not do anything to the PC for DEBUGSS.
We need to clear the Single Step setting in SR to avoid
continually stepping. */
local_irq_enable();
regs->sr &= ~SR_SSTEP;
force_sig(SIGTRAP, current);
}
/* Called with interrupts disabled */
asmlinkage void do_software_break_point(unsigned long long vec,
struct pt_regs *regs)
{
/* We need to forward step the PC, to counteract the backstep done
in signal.c. */
local_irq_enable();
force_sig(SIGTRAP, current);
regs->pc += 4;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* nothing to do.. */
}

查看文件

@@ -0,0 +1,140 @@
/*
* Just taken from alpha implementation.
* This can't work well, perhaps.
*/
/*
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <asm/semaphore.h>
#include <asm/semaphore-helper.h>
spinlock_t semaphore_wake_lock;
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_VAR \
struct task_struct *tsk = current; \
wait_queue_t wait; \
init_waitqueue_entry(&wait, tsk);
#define DOWN_HEAD(task_state) \
\
\
tsk->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
tsk->state = (task_state); \
} \
tsk->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
int ret = 0;
DOWN_VAR
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, tsk);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}

385
arch/sh64/kernel/setup.c Normal file
查看文件

@@ -0,0 +1,385 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/setup.c
*
* sh64 Arch Support
*
* This file handles the architecture-dependent parts of initialization
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* benedict.gaster@superh.com: 2nd May 2002
* Modified to use the empty_zero_page to pass command line arguments.
*
* benedict.gaster@superh.com: 3rd May 2002
* Added support for ramdisk, removing statically linked romfs at the same time.
*
* lethal@linux-sh.org: 15th May 2003
* Added generic procfs cpuinfo reporting. Make boards just export their name.
*
* lethal@linux-sh.org: 25th May 2003
* Added generic get_cpu_subtype() for subtype reporting from cpu_data->type.
*
*/
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/initrd.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/platform.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp.h>
#ifdef CONFIG_VT
#include <linux/console.h>
#endif
struct screen_info screen_info;
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#endif
extern int root_mountflags;
extern char *get_system_type(void);
extern void platform_setup(void);
extern void platform_monitor(void);
extern void platform_reserve(void);
extern int sh64_cache_init(void);
extern int sh64_tlb_init(void);
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
static char command_line[COMMAND_LINE_SIZE] = { 0, };
unsigned long long memory_start = CONFIG_MEMORY_START;
unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024);
struct sh_cpuinfo boot_cpu_data;
static inline void parse_mem_cmdline (char ** cmdline_p)
{
char c = ' ', *to = command_line, *from = COMMAND_LINE;
int len = 0;
/* Save unparsed command line copy for /proc/cmdline */
memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
for (;;) {
/*
* "mem=XXX[kKmM]" defines a size of memory.
*/
if (c == ' ' && !memcmp(from, "mem=", 4)) {
if (to != command_line)
to--;
{
unsigned long mem_size;
mem_size = memparse(from+4, &from);
memory_end = memory_start + mem_size;
}
}
c = *(from++);
if (!c)
break;
if (COMMAND_LINE_SIZE <= ++len)
break;
*(to++) = c;
}
*to = '\0';
*cmdline_p = command_line;
}
static void __init sh64_cpu_type_detect(void)
{
extern unsigned long long peek_real_address_q(unsigned long long addr);
unsigned long long cir;
/* Do peeks in real mode to avoid having to set up a mapping for the
WPC registers. On SH5-101 cut2, such a mapping would be exposed to
an address translation erratum which would make it hard to set up
correctly. */
cir = peek_real_address_q(0x0d000008);
if ((cir & 0xffff) == 0x5103) {
boot_cpu_data.type = CPU_SH5_103;
} else if (((cir >> 32) & 0xffff) == 0x51e2) {
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
} else {
boot_cpu_data.type = CPU_SH_NONE;
}
}
void __init setup_arch(char **cmdline_p)
{
unsigned long bootmap_size, i;
unsigned long first_pfn, start_pfn, last_pfn, pages;
#ifdef CONFIG_EARLY_PRINTK
extern void enable_early_printk(void);
/*
* Setup Early SCIF console
*/
enable_early_printk();
#endif
/*
* Setup TLB mappings
*/
sh64_tlb_init();
/*
* Caches are already initialized by the time we get here, so we just
* fill in cpu_data info for the caches.
*/
sh64_cache_init();
platform_setup();
platform_monitor();
sh64_cpu_type_detect();
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
if (!MOUNT_ROOT_RDONLY)
root_mountflags &= ~MS_RDONLY;
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
code_resource.start = __pa(_text);
code_resource.end = __pa(_etext)-1;
data_resource.start = __pa(_etext);
data_resource.end = __pa(_edata)-1;
parse_mem_cmdline(cmdline_p);
/*
* Find the lowest and highest page frame numbers we have available
*/
first_pfn = PFN_DOWN(memory_start);
last_pfn = PFN_DOWN(memory_end);
pages = last_pfn - first_pfn;
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(__pa(_end));
/*
* Find a proper area for the bootmem bitmap. After this
* bootstrap step all allocations (until the page allocator
* is intact) must be done via bootmem_alloc().
*/
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
first_pfn,
last_pfn);
/*
* Round it up.
*/
bootmap_size = PFN_PHYS(PFN_UP(bootmap_size));
/*
* Register fully available RAM pages with the bootmem allocator.
*/
free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages));
/*
* Reserve all kernel sections + bootmem bitmap + a guard page.
*/
reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn),
(PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn));
/*
* Reserve platform dependent sections
*/
platform_reserve();
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
initrd_start =
(long) INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
initrd_end = initrd_start + INITRD_SIZE;
} else {
printk("initrd extends beyond end of memory "
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
(long) INITRD_START + INITRD_SIZE,
PFN_PHYS(last_pfn));
initrd_start = 0;
}
}
#endif
/*
* Claim all RAM, ROM, and I/O resources.
*/
/* Kernel RAM */
request_resource(&iomem_resource, &code_resource);
request_resource(&iomem_resource, &data_resource);
/* Other KRAM space */
for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++)
request_resource(&iomem_resource,
&platform_parms.kram_res_p[i]);
/* XRAM space */
for (i = 0; i < STANDARD_XRAM_RESOURCES; i++)
request_resource(&iomem_resource,
&platform_parms.xram_res_p[i]);
/* ROM space */
for (i = 0; i < STANDARD_ROM_RESOURCES; i++)
request_resource(&iomem_resource,
&platform_parms.rom_res_p[i]);
/* I/O space */
for (i = 0; i < STANDARD_IO_RESOURCES; i++)
request_resource(&ioport_resource,
&platform_parms.io_res_p[i]);
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled");
paging_init();
}
void __xchg_called_with_bad_pointer(void)
{
printk(KERN_EMERG "xchg() called with bad pointer !\n");
}
static struct cpu cpu[1];
static int __init topology_init(void)
{
return register_cpu(cpu, 0, NULL);
}
subsys_initcall(topology_init);
/*
* Get CPU information
*/
static const char *cpu_name[] = {
[CPU_SH5_101] = "SH5-101",
[CPU_SH5_103] = "SH5-103",
[CPU_SH_NONE] = "Unknown",
};
const char *get_cpu_subtype(void)
{
return cpu_name[boot_cpu_data.type];
}
#ifdef CONFIG_PROC_FS
static int show_cpuinfo(struct seq_file *m,void *v)
{
unsigned int cpu = smp_processor_id();
if (!cpu)
seq_printf(m, "machine\t\t: %s\n", get_system_type());
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "cpu family\t: SH-5\n");
seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
seq_printf(m, "icache size\t: %dK-bytes\n",
(boot_cpu_data.icache.ways *
boot_cpu_data.icache.sets *
boot_cpu_data.icache.linesz) >> 10);
seq_printf(m, "dcache size\t: %dK-bytes\n",
(boot_cpu_data.dcache.ways *
boot_cpu_data.dcache.sets *
boot_cpu_data.dcache.linesz) >> 10);
seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries);
seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries);
#define PRINT_CLOCK(name, value) \
seq_printf(m, name " clock\t: %d.%02dMHz\n", \
((value) / 1000000), ((value) % 1000000)/10000)
PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock);
PRINT_CLOCK("bus", boot_cpu_data.bus_clock);
PRINT_CLOCK("module", boot_cpu_data.module_clock);
seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
(loops_per_jiffy*HZ+2500)/500000,
((loops_per_jiffy*HZ+2500)/5000) % 100);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return (void*)(*pos == 0);
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#endif /* CONFIG_PROC_FS */

查看文件

@@ -0,0 +1,89 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/sh_ksyms.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <linux/config.h>
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
#if 0
/* Not yet - there's no declaration of drive_info anywhere. */
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
extern struct drive_info_struct drive_info;
EXPORT_SYMBOL(drive_info);
#endif
#endif
/* platform dependent support */
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
#ifdef CONFIG_VT
EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(flush_dcache_page);
/* For ext3 */
EXPORT_SYMBOL(sh64_page_clear);
/* Ugh. These come in from libgcc.a at link time. */
extern void __sdivsi3(void);
extern void __muldi3(void);
extern void __udivsi3(void);
extern char __div_table;
EXPORT_SYMBOL(__sdivsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__div_table);

727
arch/sh64/kernel/signal.c Normal file
查看文件

@@ -0,0 +1,727 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/signal.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* Started from sh version.
*
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/personality.h>
#include <linux/suspend.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/personality.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#define REG_RET 9
#define REG_ARG1 2
#define REG_ARG2 3
#define REG_ARG3 4
#define REG_SP 15
#define REG_PR 18
#define REF_REG_RET regs->regs[REG_RET]
#define REF_REG_SP regs->regs[REG_SP]
#define DEREF_REG_PR regs->regs[REG_PR]
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int
sys_sigsuspend(old_sigset_t mask,
unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
REF_REG_RET = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
regs->pc += 4; /* because sys_sigreturn decrements the pc */
if (do_signal(regs, &saveset)) {
/* pc now points at signal handler. Need to decrement
it because entry.S will increment it. */
regs->pc -= 4;
return -EINTR;
}
}
}
asmlinkage int
sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
unsigned long r4, unsigned long r5, unsigned long r6,
unsigned long r7,
struct pt_regs * regs)
{
sigset_t saveset, newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
REF_REG_RET = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
regs->pc += 4; /* because sys_sigreturn decrements the pc */
if (do_signal(regs, &saveset)) {
/* pc now points at signal handler. Need to decrement
it because entry.S will increment it. */
regs->pc -= 4;
return -EINTR;
}
}
}
asmlinkage int
sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r4, unsigned long r5, unsigned long r6,
unsigned long r7,
struct pt_regs * regs)
{
return do_sigaltstack(uss, uoss, REF_REG_SP);
}
/*
* Do a signal return; undo the signal stack.
*/
struct sigframe
{
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
long long retcode[2];
};
struct rt_sigframe
{
struct siginfo __user *pinfo;
void *puc;
struct siginfo info;
struct ucontext uc;
long long retcode[2];
};
#ifdef CONFIG_SH_FPU
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
err |= __get_user (fpvalid, &sc->sc_fpvalid);
conditional_used_math(fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
fpvalid = !!used_math();
err |= __put_user(fpvalid, &sc->sc_fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
grab_fpu();
fpsave(&current->thread.fpu.hard);
release_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
return err;
}
#else
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{}
#endif
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
{
unsigned int err = 0;
unsigned long long current_sr, new_sr;
#define SR_MASK 0xffff8cfd
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
/* Prevent the signal handler manipulating SR in a way that can
crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
modified */
current_sr = regs->sr;
err |= __get_user(new_sr, &sc->sc_sr);
regs->sr &= SR_MASK;
regs->sr |= (new_sr & ~SR_MASK);
COPY(pc);
#undef COPY
/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
* has been restored above.) */
err |= restore_sigcontext_fpu(regs, sc);
regs->syscall_nr = -1; /* disable syscall checks */
err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
return err;
}
asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->sc, &ret))
goto badframe;
regs->pc -= 4;
return (int) ret;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
sigset_t set;
stack_t __user st;
long long ret;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
goto badframe;
regs->pc -= 4;
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
do_sigaltstack(&st, NULL, REF_REG_SP);
return (int) ret;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* Set up a signal frame.
*/
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
err |= setup_sigcontext_fpu(regs, sc);
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
COPY(sr); COPY(pc);
#undef COPY
err |= __put_user(mask, &sc->oldmask);
return err;
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -8ul);
}
void sa_default_restorer(void); /* See comments below */
void sa_default_rt_restorer(void); /* See comments below */
static void setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int err = 0;
int signal;
frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
signal = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
/* Give up earlier as i386, in case */
if (err)
goto give_sigsegv;
if (_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)); }
/* Give up earlier as i386, in case */
if (err)
goto give_sigsegv;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
if (__copy_to_user(frame->retcode,
(unsigned long long)sa_default_restorer & (~1), 16) != 0)
goto give_sigsegv;
/* Cohere the trampoline with the I-cache. */
flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = (unsigned long) frame;
regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
(regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
/* FIXME:
The glibc profiling support for SH-5 needs to be passed a sigcontext
so it can retrieve the PC. At some point during 2003 the glibc
support was changed to receive the sigcontext through the 2nd
argument, but there are still versions of libc.so in use that use
the 3rd argument. Until libc.so is stabilised, pass the sigcontext
through both 2nd and 3rd arguments.
*/
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
set_fs(USER_DS);
#if DEBUG_SIG
/* Broken %016Lx */
printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
signal,
current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
#endif
return;
give_sigsegv:
force_sigsegv(sig, current);
}
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0;
int signal;
frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
signal = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
/* Give up earlier as i386, in case */
if (err)
goto give_sigsegv;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user((void *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Give up earlier as i386, in case */
if (err)
goto give_sigsegv;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
if (__copy_to_user(frame->retcode,
(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
goto give_sigsegv;
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = (unsigned long) frame;
regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
(regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
set_fs(USER_DS);
#if DEBUG_SIG
/* Broken %016Lx */
printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
signal,
current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
#endif
return;
give_sigsegv:
force_sigsegv(sig, current);
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs * regs)
{
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
switch (regs->regs[REG_RET]) {
case -ERESTARTNOHAND:
regs->regs[REG_RET] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->regs[REG_RET] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/* Decode syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
}
}
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(sig, ka, info, oldset, regs);
else
setup_frame(sig, ka, oldset, regs);
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
int do_signal(struct pt_regs *regs, sigset_t *oldset)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return 1;
if (try_to_freeze(0))
goto no_signal;
if (!oldset)
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, 0);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
handle_signal(signr, &info, &ka, oldset, regs);
return 1;
}
no_signal:
/* Did we come from a system call? */
if (regs->syscall_nr >= 0) {
/* Restart the system call - no handlers present */
if (regs->regs[REG_RET] == -ERESTARTNOHAND ||
regs->regs[REG_RET] == -ERESTARTSYS ||
regs->regs[REG_RET] == -ERESTARTNOINTR) {
/* Decode Syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
}
}
return 0;
}

198
arch/sh64/kernel/switchto.S Normal file
查看文件

@@ -0,0 +1,198 @@
/*
* arch/sh64/kernel/switchto.S
*
* sh64 context switch
*
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
.section .text..SHmedia32,"ax"
.little
.balign 32
.type sh64_switch_to,@function
.global sh64_switch_to
.global __sh64_switch_to_end
sh64_switch_to:
/* Incoming args
r2 - prev
r3 - &prev->thread
r4 - next
r5 - &next->thread
Outgoing results
r2 - last (=prev) : this just stays in r2 throughout
Want to create a full (struct pt_regs) on the stack to allow backtracing
functions to work. However, we only need to populate the callee-save
register slots in this structure; since we're a function our ancestors must
have themselves preserved all caller saved state in the stack. This saves
some wasted effort since we won't need to look at the values.
In particular, all caller-save registers are immediately available for
scratch use.
*/
#define FRAME_SIZE (76*8 + 8)
movi FRAME_SIZE, r0
sub.l r15, r0, r15
! Do normal-style register save to support backtrace
st.l r15, 0, r18 ! save link reg
st.l r15, 4, r14 ! save fp
add.l r15, r63, r14 ! setup frame pointer
! hopefully this looks normal to the backtrace now.
addi.l r15, 8, r1 ! base of pt_regs
addi.l r1, 24, r0 ! base of pt_regs.regs
addi.l r0, (63*8), r8 ! base of pt_regs.trregs
/* Note : to be fixed?
struct pt_regs is really designed for holding the state on entry
to an exception, i.e. pc,sr,regs etc. However, for the context
switch state, some of this is not required. But the unwinder takes
struct pt_regs * as an arg so we have to build this structure
to allow unwinding switched tasks in show_state() */
st.q r0, ( 9*8), r9
st.q r0, (10*8), r10
st.q r0, (11*8), r11
st.q r0, (12*8), r12
st.q r0, (13*8), r13
st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
! the point where the process is left in suspended animation, i.e. current
! fp here, not the saved one.
st.q r0, (16*8), r16
st.q r0, (24*8), r24
st.q r0, (25*8), r25
st.q r0, (26*8), r26
st.q r0, (27*8), r27
st.q r0, (28*8), r28
st.q r0, (29*8), r29
st.q r0, (30*8), r30
st.q r0, (31*8), r31
st.q r0, (32*8), r32
st.q r0, (33*8), r33
st.q r0, (34*8), r34
st.q r0, (35*8), r35
st.q r0, (44*8), r44
st.q r0, (45*8), r45
st.q r0, (46*8), r46
st.q r0, (47*8), r47
st.q r0, (48*8), r48
st.q r0, (49*8), r49
st.q r0, (50*8), r50
st.q r0, (51*8), r51
st.q r0, (52*8), r52
st.q r0, (53*8), r53
st.q r0, (54*8), r54
st.q r0, (55*8), r55
st.q r0, (56*8), r56
st.q r0, (57*8), r57
st.q r0, (58*8), r58
st.q r0, (59*8), r59
! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
! Use a local label to avoid creating a symbol that will confuse the !
! backtrace
pta .Lsave_pc, tr0
gettr tr5, r45
gettr tr6, r46
gettr tr7, r47
st.q r8, (5*8), r45
st.q r8, (6*8), r46
st.q r8, (7*8), r47
! Now switch context
gettr tr0, r9
st.l r3, 0, r15 ! prev->thread.sp
st.l r3, 8, r1 ! prev->thread.kregs
st.l r3, 4, r9 ! prev->thread.pc
st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
! Load PC for next task (init value or save_pc later)
ld.l r5, 4, r18 ! next->thread.pc
! Switch stacks
ld.l r5, 0, r15 ! next->thread.sp
ptabs r18, tr0
! Update current
ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
putcon r9, kcr0 ! current = next->thread_info
! go to save_pc for a reschedule, or the initial thread.pc for a new process
blink tr0, r63
! Restore (when we come back to a previously saved task)
.Lsave_pc:
addi.l r15, 32, r0 ! r0 = next's regs
addi.l r0, (63*8), r8 ! r8 = next's tr_regs
ld.q r8, (5*8), r45
ld.q r8, (6*8), r46
ld.q r8, (7*8), r47
ptabs r45, tr5
ptabs r46, tr6
ptabs r47, tr7
ld.q r0, ( 9*8), r9
ld.q r0, (10*8), r10
ld.q r0, (11*8), r11
ld.q r0, (12*8), r12
ld.q r0, (13*8), r13
ld.q r0, (14*8), r14
ld.q r0, (16*8), r16
ld.q r0, (24*8), r24
ld.q r0, (25*8), r25
ld.q r0, (26*8), r26
ld.q r0, (27*8), r27
ld.q r0, (28*8), r28
ld.q r0, (29*8), r29
ld.q r0, (30*8), r30
ld.q r0, (31*8), r31
ld.q r0, (32*8), r32
ld.q r0, (33*8), r33
ld.q r0, (34*8), r34
ld.q r0, (35*8), r35
ld.q r0, (44*8), r44
ld.q r0, (45*8), r45
ld.q r0, (46*8), r46
ld.q r0, (47*8), r47
ld.q r0, (48*8), r48
ld.q r0, (49*8), r49
ld.q r0, (50*8), r50
ld.q r0, (51*8), r51
ld.q r0, (52*8), r52
ld.q r0, (53*8), r53
ld.q r0, (54*8), r54
ld.q r0, (55*8), r55
ld.q r0, (56*8), r56
ld.q r0, (57*8), r57
ld.q r0, (58*8), r58
ld.q r0, (59*8), r59
! epilogue
ld.l r15, 0, r18
ld.l r15, 4, r14
ptabs r18, tr0
movi FRAME_SIZE, r0
add r15, r0, r15
blink tr0, r63
__sh64_switch_to_end:
.LFE1:
.size sh64_switch_to,.LFE1-sh64_switch_to

300
arch/sh64/kernel/sys_sh64.c Normal file
查看文件

@@ -0,0 +1,300 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/sys_sh64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/SH5
* platform.
*
* Mostly taken from i386 version.
*
*/
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
#include <asm/ptrace.h>
#define REG_3 3
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
#ifdef NEW_PIPE_IMPLEMENTATION
asmlinkage int sys_pipe(unsigned long * fildes,
unsigned long dummy_r3,
unsigned long dummy_r4,
unsigned long dummy_r5,
unsigned long dummy_r6,
unsigned long dummy_r7,
struct pt_regs * regs) /* r8 = pt_regs forced by entry.S */
{
int fd[2];
int ret;
ret = do_pipe(fd);
if (ret == 0)
/*
***********************************************************************
* To avoid the copy_to_user we prefer to break the ABIs convention, *
* packing the valid pair of file IDs into a single register (r3); *
* while r2 is the return code as defined by the sh5-ABIs. *
* BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
***********************************************************************
#ifdef __LITTLE_ENDIAN__
regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
#else
regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
#endif
*/
/* although not very clever this is endianess independent */
regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd);
return ret;
}
#else
asmlinkage int sys_pipe(unsigned long * fildes)
{
int fd[2];
int error;
error = do_pipe(fd);
if (!error) {
if (copy_to_user(fildes, fd, 2*sizeof(int)))
error = -EFAULT;
}
return error;
}
#endif
/*
* To avoid cache alias, we map the shard page with same color.
*/
#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vma;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (flags & MAP_PRIVATE)
addr = PAGE_ALIGN(addr);
else
addr = COLOUR_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vma || addr + len <= vma->vm_start)
return addr;
addr = vma->vm_end;
if (!(flags & MAP_PRIVATE))
addr = COLOUR_ALIGN(addr);
}
}
/* common code for old and new mmaps */
static inline long do_mmap2(
unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
int error = -EBADF;
struct file * file = NULL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
goto out;
}
down_write(&current->mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&current->mm->mmap_sem);
if (file)
fput(file);
out:
return error;
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
return do_mmap2(addr, len, prot, flags, fd, pgoff);
}
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off)
{
if (off & ~PAGE_MASK)
return -EINVAL;
return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
asmlinkage int sys_ipc(uint call, int first, int second,
int third, void __user *ptr, long fifth)
{
int version, ret;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
if (call <= SEMCTL)
switch (call) {
case SEMOP:
return sys_semtimedop(first, (struct sembuf __user *)ptr,
second, NULL);
case SEMTIMEDOP:
return sys_semtimedop(first, (struct sembuf __user *)ptr,
second,
(const struct timespec __user *)fifth);
case SEMGET:
return sys_semget (first, second, third);
case SEMCTL: {
union semun fourth;
if (!ptr)
return -EINVAL;
if (get_user(fourth.__pad, (void * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
default:
return -EINVAL;
}
if (call <= MSGCTL)
switch (call) {
case MSGSND:
return sys_msgsnd (first, (struct msgbuf __user *) ptr,
second, third);
case MSGRCV:
switch (version) {
case 0: {
struct ipc_kludge tmp;
if (!ptr)
return -EINVAL;
if (copy_from_user(&tmp,
(struct ipc_kludge __user *) ptr,
sizeof (tmp)))
return -EFAULT;
return sys_msgrcv (first, tmp.msgp, second,
tmp.msgtyp, third);
}
default:
return sys_msgrcv (first,
(struct msgbuf __user *) ptr,
second, fifth, third);
}
case MSGGET:
return sys_msgget ((key_t) first, second);
case MSGCTL:
return sys_msgctl (first, second,
(struct msqid_ds __user *) ptr);
default:
return -EINVAL;
}
if (call <= SHMCTL)
switch (call) {
case SHMAT:
switch (version) {
default: {
ulong raddr;
ret = do_shmat (first, (char __user *) ptr,
second, &raddr);
if (ret)
return ret;
return put_user (raddr, (ulong __user *) third);
}
case 1: /* iBCS2 emulator entry point */
if (!segment_eq(get_fs(), get_ds()))
return -EINVAL;
return do_shmat (first, (char __user *) ptr,
second, (ulong *) third);
}
case SHMDT:
return sys_shmdt ((char __user *)ptr);
case SHMGET:
return sys_shmget (first, second, third);
case SHMCTL:
return sys_shmctl (first, second,
(struct shmid_ds __user *) ptr);
default:
return -EINVAL;
}
return -EINVAL;
}
asmlinkage int sys_uname(struct old_utsname * name)
{
int err;
if (!name)
return -EFAULT;
down_read(&uts_sem);
err=copy_to_user(name, &system_utsname, sizeof (*name));
up_read(&uts_sem);
return err?-EFAULT:0;
}
/* Copy from mips version */
asmlinkage long sys_shmatcall(int shmid, char __user *shmaddr,
int shmflg)
{
unsigned long raddr;
int err;
err = do_shmat(shmid, shmaddr, shmflg, &raddr);
if (err)
return err;
err = raddr;
return err;
}

345
arch/sh64/kernel/syscalls.S Normal file
查看文件

@@ -0,0 +1,345 @@
/*
* arch/sh64/kernel/syscalls.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
.section .data, "aw"
.balign 32
/*
* System calls jump table
*/
.globl sys_call_table
sys_call_table:
.long sys_ni_syscall /* 0 - old "setup()" system call */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys( */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_ni_syscall /* sys_oldselect */
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall /* Obsolete implementation of socket syscall */
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* idle */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc /* Obsolete ipc syscall implementation */
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_ni_syscall /* sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_nfsservctl
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
/* Broken-out socket family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_socket /* 220 */
.long sys_bind
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname /* 225 */
.long sys_getpeername
.long sys_socketpair
.long sys_send
.long sys_sendto
.long sys_recv /* 230*/
.long sys_recvfrom
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg /* 235 */
.long sys_recvmsg
/* Broken-out IPC family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_semop
.long sys_semget
.long sys_semctl
.long sys_msgsnd /* 240 */
.long sys_msgrcv
.long sys_msgget
.long sys_msgctl
.long sys_shmatcall
.long sys_shmdt /* 245 */
.long sys_shmget
.long sys_shmctl
/* Rest of syscalls listed in 2.4 i386 unistd.h */
.long sys_getdents64
.long sys_fcntl64
.long sys_ni_syscall /* 250 reserved for TUX */
.long sys_ni_syscall /* Reserved for Security */
.long sys_gettid
.long sys_readahead
.long sys_setxattr
.long sys_lsetxattr /* 255 */
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr /* 260 */
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr /* 265 */
.long sys_tkill
.long sys_sendfile64
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64
.long sys_ni_syscall
.long sys_exit_group /* 280 */
/* Rest of new 2.6 syscalls */
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages /* 285 */
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun /* 290 */
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep /* 295 */
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill
.long sys_utimes
.long sys_fadvise64_64 /* 300 */
.long sys_ni_syscall /* Reserved for vserver */
.long sys_ni_syscall /* Reserved for mbind */
.long sys_ni_syscall /* get_mempolicy */
.long sys_ni_syscall /* set_mempolicy */
.long sys_mq_open /* 305 */
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify
.long sys_mq_getsetattr /* 310 */
.long sys_ni_syscall /* Reserved for kexec */
.long sys_waitid
.long sys_add_key
.long sys_request_key
.long sys_keyctl /* 315 */

610
arch/sh64/kernel/time.c Normal file
查看文件

@@ -0,0 +1,610 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/time.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* Original TMU/RTC code taken from sh version.
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Some code taken from i386 version.
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <asm/registers.h> /* required by inline __asm__ stmt. */
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
#include <linux/timex.h>
#include <linux/irq.h>
#include <asm/hardware.h>
#define TMU_TOCR_INIT 0x00
#define TMU0_TCR_INIT 0x0020
#define TMU_TSTR_INIT 1
#define TMU_TSTR_OFF 0
/* RCR1 Bits */
#define RCR1_CF 0x80 /* Carry Flag */
#define RCR1_CIE 0x10 /* Carry Interrupt Enable */
#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
#define RCR1_AF 0x01 /* Alarm Flag */
/* RCR2 Bits */
#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
#define RCR2_RTCEN 0x08 /* ENable RTC */
#define RCR2_ADJ 0x04 /* ADJustment (30-second) */
#define RCR2_RESET 0x02 /* Reset bit */
#define RCR2_START 0x01 /* Start bit */
/* Clock, Power and Reset Controller */
#define CPRC_BLOCK_OFF 0x01010000
#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
#define FRQCR (cprc_base+0x0)
#define WTCSR (cprc_base+0x0018)
#define STBCR (cprc_base+0x0030)
/* Time Management Unit */
#define TMU_BLOCK_OFF 0x01020000
#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
#define TMU_TOCR tmu_base+0x0 /* Byte access */
#define TMU_TSTR tmu_base+0x4 /* Byte access */
#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
#define TMU0_TCR TMU0_BASE+0x8 /* Word access */
/* Real Time Clock */
#define RTC_BLOCK_OFF 0x01040000
#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
#define R64CNT rtc_base+0x00
#define RSECCNT rtc_base+0x04
#define RMINCNT rtc_base+0x08
#define RHRCNT rtc_base+0x0c
#define RWKCNT rtc_base+0x10
#define RDAYCNT rtc_base+0x14
#define RMONCNT rtc_base+0x18
#define RYRCNT rtc_base+0x1c /* 16bit */
#define RSECAR rtc_base+0x20
#define RMINAR rtc_base+0x24
#define RHRAR rtc_base+0x28
#define RWKAR rtc_base+0x2c
#define RDAYAR rtc_base+0x30
#define RMONAR rtc_base+0x34
#define RCR1 rtc_base+0x38
#define RCR2 rtc_base+0x3c
#ifndef BCD_TO_BIN
#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
#endif
#ifndef BIN_TO_BCD
#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
#endif
#define TICK_SIZE (tick_nsec / 1000)
extern unsigned long wall_jiffies;
u64 jiffies_64 = INITIAL_JIFFIES;
static unsigned long tmu_base, rtc_base;
unsigned long cprc_base;
/* Variables to allow interpolation of time of day to resolution better than a
* jiffy. */
/* This is effectively protected by xtime_lock */
static unsigned long ctc_last_interrupt;
static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
#define CTC_JIFFY_SCALE_SHIFT 40
/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
/* Estimate number of microseconds that have elapsed since the last timer tick,
by scaling the delta that has occured in the CTC register.
WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
sleeping, though will be coarser.
FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
is running or if the freq or tick arguments of adjtimex are modified after
we have calibrated the scaling factor? This will result in either a jump at
the end of a tick period, or a wrap backwards at the start of the next one,
if the application is reading the time of day often enough. I think we
ought to do better than this. For this reason, usecs_per_jiffy is left
separated out in the calculation below. This allows some future hook into
the adjtime-related stuff in kernel/timer.c to remove this hazard.
*/
static unsigned long usecs_since_tick(void)
{
unsigned long long current_ctc;
long ctc_ticks_since_interrupt;
unsigned long long ull_ctc_ticks_since_interrupt;
unsigned long result;
unsigned long long mul1_out;
unsigned long long mul1_out_high;
unsigned long long mul2_out_low, mul2_out_high;
/* Read CTC register */
asm ("getcon cr62, %0" : "=r" (current_ctc));
/* Note, the CTC counts down on each CPU clock, not up.
Note(2), use long type to get correct wraparound arithmetic when
the counter crosses zero. */
ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
/* Inline assembly to do 32x32x32->64 multiplier */
asm volatile ("mulu.l %1, %2, %0" :
"=r" (mul1_out) :
"r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
mul1_out_high = mul1_out >> 32;
asm volatile ("mulu.l %1, %2, %0" :
"=r" (mul2_out_low) :
"r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
#if 1
asm volatile ("mulu.l %1, %2, %0" :
"=r" (mul2_out_high) :
"r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
#endif
result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
return result;
}
void do_gettimeofday(struct timeval *tv)
{
unsigned long flags;
unsigned long seq;
unsigned long usec, sec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = usecs_since_tick();
{
unsigned long lost = jiffies - wall_jiffies;
if (lost)
usec += lost * (1000000 / HZ);
}
sec = xtime.tv_sec;
usec += xtime.tv_nsec / 1000;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
sec++;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
}
int do_settimeofday(struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
nsec -= 1000 * (usecs_since_tick() +
(jiffies - wall_jiffies) * (1000000 / HZ));
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
static int set_rtc_time(unsigned long nowtime)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
cmos_minutes = ctrl_inb(RMINCNT);
BCD_TO_BIN(cmos_minutes);
/*
* since we're only adjusting minutes and seconds,
* don't interfere with hour overflow. This avoids
* messing with unknown time zones but requires your
* RTC not to be off by more than 15 minutes
*/
real_seconds = nowtime % 60;
real_minutes = nowtime / 60;
if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
real_minutes += 30; /* correct for half hour time zone */
real_minutes %= 60;
if (abs(real_minutes - cmos_minutes) < 30) {
BIN_TO_BCD(real_seconds);
BIN_TO_BCD(real_minutes);
ctrl_outb(real_seconds, RSECCNT);
ctrl_outb(real_minutes, RMINCNT);
} else {
printk(KERN_WARNING
"set_rtc_time: can't update from %d to %d\n",
cmos_minutes, real_minutes);
retval = -1;
}
ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
return retval;
}
/* last time the RTC clock got updated */
static long last_rtc_update = 0;
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long long current_ctc;
asm ("getcon cr62, %0" : "=r" (current_ctc));
ctc_last_interrupt = (unsigned long) current_ctc;
do_timer(regs);
#ifndef CONFIG_SMP
update_process_times(user_mode(regs));
#endif
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
{
extern void heartbeat(void);
heartbeat();
}
#endif
/*
* If we have an externally synchronized Linux clock, then update
* RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
if ((time_status & STA_UNSYNC) == 0 &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
if (set_rtc_time(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
}
}
/*
* This is the same as the above, except we _also_ save the current
* Time Stamp Counter value at the time of the timer interrupt, so that
* we later on can estimate the time of day more exactly.
*/
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long timer_status;
/* Clear UNF bit */
timer_status = ctrl_inw(TMU0_TCR);
timer_status &= ~0x100;
ctrl_outw(timer_status, TMU0_TCR);
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
* CPU. We need to avoid to SMP race with it. NOTE: we don' t need
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
write_lock(&xtime_lock);
do_timer_interrupt(irq, NULL, regs);
write_unlock(&xtime_lock);
return IRQ_HANDLED;
}
static unsigned long get_rtc_time(void)
{
unsigned int sec, min, hr, wk, day, mon, yr, yr100;
again:
do {
ctrl_outb(0, RCR1); /* Clear CF-bit */
sec = ctrl_inb(RSECCNT);
min = ctrl_inb(RMINCNT);
hr = ctrl_inb(RHRCNT);
wk = ctrl_inb(RWKCNT);
day = ctrl_inb(RDAYCNT);
mon = ctrl_inb(RMONCNT);
yr = ctrl_inw(RYRCNT);
yr100 = (yr >> 8);
yr &= 0xff;
} while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
BCD_TO_BIN(yr100);
BCD_TO_BIN(yr);
BCD_TO_BIN(mon);
BCD_TO_BIN(day);
BCD_TO_BIN(hr);
BCD_TO_BIN(min);
BCD_TO_BIN(sec);
if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
hr > 23 || min > 59 || sec > 59) {
printk(KERN_ERR
"SH RTC: invalid value, resetting to 1 Jan 2000\n");
ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
ctrl_outb(0, RSECCNT);
ctrl_outb(0, RMINCNT);
ctrl_outb(0, RHRCNT);
ctrl_outb(6, RWKCNT);
ctrl_outb(1, RDAYCNT);
ctrl_outb(1, RMONCNT);
ctrl_outw(0x2000, RYRCNT);
ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
goto again;
}
return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
}
static __init unsigned int get_cpu_hz(void)
{
unsigned int count;
unsigned long __dummy;
unsigned long ctc_val_init, ctc_val;
/*
** Regardless the toolchain, force the compiler to use the
** arbitrary register r3 as a clock tick counter.
** NOTE: r3 must be in accordance with rtc_interrupt()
*/
register unsigned long long __rtc_irq_flag __asm__ ("r3");
local_irq_enable();
do {} while (ctrl_inb(R64CNT) != 0);
ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
/*
* r3 is arbitrary. CDC does not support "=z".
*/
ctc_val_init = 0xffffffff;
ctc_val = ctc_val_init;
asm volatile("gettr tr0, %1\n\t"
"putcon %0, " __CTC "\n\t"
"and %2, r63, %2\n\t"
"pta $+4, tr0\n\t"
"beq/l %2, r63, tr0\n\t"
"ptabs %1, tr0\n\t"
"getcon " __CTC ", %0\n\t"
: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
: "0" (0));
local_irq_disable();
/*
* SH-3:
* CPU clock = 4 stages * loop
* tst rm,rm if id ex
* bt/s 1b if id ex
* add #1,rd if id ex
* (if) pipe line stole
* tst rm,rm if id ex
* ....
*
*
* SH-4:
* CPU clock = 6 stages * loop
* I don't know why.
* ....
*
* SH-5:
* Use CTC register to count. This approach returns the right value
* even if the I-cache is disabled (e.g. whilst debugging.)
*
*/
count = ctc_val_init - ctc_val; /* CTC counts down */
#if defined (CONFIG_SH_SIMULATOR)
/*
* Let's pretend we are a 5MHz SH-5 to avoid a too
* little timer interval. Also to keep delay
* calibration within a reasonable time.
*/
return 5000000;
#else
/*
* This really is count by the number of clock cycles
* by the ratio between a complete R64CNT
* wrap-around (128) and CUI interrupt being raised (64).
*/
return count*2;
#endif
}
static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
regs->regs[3] = 1; /* Using r3 */
return IRQ_HANDLED;
}
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL};
static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL};
void __init time_init(void)
{
unsigned int cpu_clock, master_clock, bus_clock, module_clock;
unsigned long interval;
unsigned long frqcr, ifc, pfc;
static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
#define bfc_table ifc_table /* Same */
#define pfc_table ifc_table /* Same */
tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
if (!tmu_base) {
panic("Unable to remap TMU\n");
}
rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
if (!rtc_base) {
panic("Unable to remap RTC\n");
}
cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
if (!cprc_base) {
panic("Unable to remap CPRC\n");
}
xtime.tv_sec = get_rtc_time();
xtime.tv_nsec = 0;
setup_irq(TIMER_IRQ, &irq0);
setup_irq(RTC_IRQ, &irq1);
/* Check how fast it is.. */
cpu_clock = get_cpu_hz();
/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
disable_irq(RTC_IRQ);
printk("CPU clock: %d.%02dMHz\n",
(cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
{
unsigned short bfc;
frqcr = ctrl_inl(FRQCR);
ifc = ifc_table[(frqcr>> 6) & 0x0007];
bfc = bfc_table[(frqcr>> 3) & 0x0007];
pfc = pfc_table[(frqcr>> 12) & 0x0007];
master_clock = cpu_clock * ifc;
bus_clock = master_clock/bfc;
}
printk("Bus clock: %d.%02dMHz\n",
(bus_clock/1000000), (bus_clock % 1000000)/10000);
module_clock = master_clock/pfc;
printk("Module clock: %d.%02dMHz\n",
(module_clock/1000000), (module_clock % 1000000)/10000);
interval = (module_clock/(HZ*4));
printk("Interval = %ld\n", interval);
current_cpu_data.cpu_clock = cpu_clock;
current_cpu_data.master_clock = master_clock;
current_cpu_data.bus_clock = bus_clock;
current_cpu_data.module_clock = module_clock;
/* Start TMU0 */
ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
ctrl_outl(interval, TMU0_TCOR);
ctrl_outl(interval, TMU0_TCNT);
ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
}
void enter_deep_standby(void)
{
/* Disable watchdog timer */
ctrl_outl(0xa5000000, WTCSR);
/* Configure deep standby on sleep */
ctrl_outl(0x03, STBCR);
#ifdef CONFIG_SH_ALPHANUMERIC
{
extern void mach_alphanum(int position, unsigned char value);
extern void mach_alphanum_brightness(int setting);
char halted[] = "Halted. ";
int i;
mach_alphanum_brightness(6); /* dimmest setting above off */
for (i=0; i<8; i++) {
mach_alphanum(i, halted[i]);
}
asm __volatile__ ("synco");
}
#endif
asm __volatile__ ("sleep");
asm __volatile__ ("synci");
asm __volatile__ ("nop");
asm __volatile__ ("nop");
asm __volatile__ ("nop");
asm __volatile__ ("nop");
panic("Unexpected wakeup!\n");
}
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}

961
arch/sh64/kernel/traps.c Normal file
查看文件

@@ -0,0 +1,961 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/traps.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#undef DEBUG_EXCEPTION
#ifdef DEBUG_EXCEPTION
/* implemented in ../lib/dbg.c */
extern void show_excp_regs(char *fname, int trapnr, int signr,
struct pt_regs *regs);
#else
#define show_excp_regs(a, b, c, d)
#endif
static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
#define DO_ERROR(trapnr, signr, str, name, tsk) \
asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
{ \
do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
}
spinlock_t die_lock;
void die(const char * str, struct pt_regs * regs, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
printk("%s: %lx\n", str, (err & 0xffffff));
show_regs(regs);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
/* Implement misaligned load/store handling for kernel (and optionally for user
mode too). Limitation : only SHmedia mode code is handled - there is no
handling at all for misaligned accesses occurring in SHcompact code yet. */
static int misaligned_fixup(struct pt_regs *regs);
asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0) {
do_unhandled_exception(7, SIGSEGV, "address error(load)",
"do_address_error_load",
error_code, regs, current);
}
return;
}
asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0) {
do_unhandled_exception(8, SIGSEGV, "address error(store)",
"do_address_error_store",
error_code, regs, current);
}
return;
}
#if defined(CONFIG_SH64_ID2815_WORKAROUND)
#define OPCODE_INVALID 0
#define OPCODE_USER_VALID 1
#define OPCODE_PRIV_VALID 2
/* getcon/putcon - requires checking which control register is referenced. */
#define OPCODE_CTRL_REG 3
/* Table of valid opcodes for SHmedia mode.
Form a 10-bit value by concatenating the major/minor opcodes i.e.
opcode[31:26,20:16]. The 6 MSBs of this value index into the following
array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
LSBs==4'b0000 etc). */
static unsigned long shmedia_opcode_table[64] = {
0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
/* Workaround SH5-101 cut2 silicon defect #2815 :
in some situations, inter-mode branches from SHcompact -> SHmedia
which should take ITLBMISS or EXECPROT exceptions at the target
falsely take RESINST at the target instead. */
unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
unsigned long pc, aligned_pc;
int get_user_error;
int trapnr = 12;
int signr = SIGILL;
char *exception_name = "reserved_instruction";
pc = regs->pc;
if ((pc & 3) == 1) {
/* SHmedia : check for defect. This requires executable vmas
to be readable too. */
aligned_pc = pc & ~3;
if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
}
if (get_user_error >= 0) {
unsigned long index, shift;
unsigned long major, minor, combined;
unsigned long reserved_field;
reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
combined = (major << 4) | minor;
index = major;
shift = minor << 1;
if (reserved_field == 0) {
int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
switch (opcode_state) {
case OPCODE_INVALID:
/* Trap. */
break;
case OPCODE_USER_VALID:
/* Restart the instruction : the branch to the instruction will now be from an RTE
not from SHcompact so the silicon defect won't be triggered. */
return;
case OPCODE_PRIV_VALID:
if (!user_mode(regs)) {
/* Should only ever get here if a module has
SHcompact code inside it. If so, the same fix up is needed. */
return; /* same reason */
}
/* Otherwise, user mode trying to execute a privileged instruction -
fall through to trap. */
break;
case OPCODE_CTRL_REG:
/* If in privileged mode, return as above. */
if (!user_mode(regs)) return;
/* In user mode ... */
if (combined == 0x9f) { /* GETCON */
unsigned long regno = (opcode >> 20) & 0x3f;
if (regno >= 62) {
return;
}
/* Otherwise, reserved or privileged control register, => trap */
} else if (combined == 0x1bf) { /* PUTCON */
unsigned long regno = (opcode >> 4) & 0x3f;
if (regno >= 62) {
return;
}
/* Otherwise, reserved or privileged control register, => trap */
} else {
/* Trap */
}
break;
default:
/* Fall through to trap. */
break;
}
}
/* fall through to normal resinst processing */
} else {
/* Error trying to read opcode. This typically means a
real fault, not a RESINST any more. So change the
codes. */
trapnr = 87;
exception_name = "address error (exec)";
signr = SIGSEGV;
}
}
do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
}
#else /* CONFIG_SH64_ID2815_WORKAROUND */
/* If the workaround isn't needed, this is just a straightforward reserved
instruction */
DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
#endif /* CONFIG_SH64_ID2815_WORKAROUND */
#include <asm/system.h>
/* Called with interrupts disabled */
asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
{
PLS();
show_excp_regs(__FUNCTION__, -1, -1, regs);
die_if_kernel("exception", regs, ex);
}
int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
{
/* Syscall debug */
printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
die_if_kernel("unknown trapa", regs, scId);
return -ENOSYS;
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
#ifdef CONFIG_KALLSYMS
extern void sh64_unwind(struct pt_regs *regs);
struct pt_regs *regs;
regs = tsk ? tsk->thread.kregs : NULL;
sh64_unwind(regs);
#else
printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
#endif
}
void show_task(unsigned long *sp)
{
show_stack(NULL, sp);
}
void dump_stack(void)
{
show_task(NULL);
}
/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
EXPORT_SYMBOL(dump_stack);
static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
{
show_excp_regs(fn_name, trapnr, signr, regs);
tsk->thread.error_code = error_code;
tsk->thread.trap_no = trapnr;
if (user_mode(regs))
force_sig(signr, tsk);
die_if_no_fixup(str, regs, error_code);
}
static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
unsigned long opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
} else {
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
*result_opcode = *(unsigned long *) aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
/* SHcompact */
/* TODO : provide handling for this. We don't really support
user-mode SHcompact yet, and for a kernel fault, this would
have to come from a module built for SHcompact. */
return -EFAULT;
} else {
/* misaligned */
return -EFAULT;
}
}
static int address_is_sign_extended(__u64 a)
{
__u64 b;
#if (NEFF == 32)
b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
return (b == a) ? 1 : 0;
#else
#error "Sign extend check only works for NEFF==32"
#endif
}
static int generate_and_check_address(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
/* return -1 for fault, 0 for OK */
__u64 base_address, addr;
int basereg;
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
__s64 displacement;
displacement = (opcode >> 10) & 0x3ff;
displacement = ((displacement << 54) >> 54); /* sign extend */
addr = (__u64)((__s64)base_address + (displacement << width_shift));
} else {
__u64 offset;
int offsetreg;
offsetreg = (opcode >> 10) & 0x3f;
offset = regs->regs[offsetreg];
addr = base_address + offset;
}
/* Check sign extended */
if (!address_is_sign_extended(addr)) {
return -1;
}
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
if (addr >= TASK_SIZE) {
return -1;
}
/* Do access_ok check later - it depends on whether it's a load or a store. */
}
#endif
*address = addr;
return 0;
}
/* Default value as for sh */
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
static int user_mode_unaligned_fixup_count = 10;
static int user_mode_unaligned_fixup_enable = 1;
#endif
static int kernel_mode_unaligned_fixup_count = 32;
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
q[0] = p[0];
q[1] = p[1];
if (do_sign_extend) {
*result = (__u64)(__s64) *(short *) &x;
} else {
*result = (__u64) x;
}
}
static void misaligned_kernel_word_store(__u64 address, __u64 value)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
x = (__u16) value;
p[0] = q[0];
p[1] = q[1];
}
static int misaligned_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
destreg = (opcode >> 4) & 0x3f;
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
switch (width_shift) {
case 1:
if (do_sign_extend) {
regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
} else {
regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
}
break;
case 2:
regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
break;
case 3:
regs->regs[destreg] = buffer;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
} else
#endif
{
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
switch (width_shift) {
case 1:
misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
break;
case 2:
asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
case 3:
asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
static int misaligned_store(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
srcreg = (opcode >> 4) & 0x3f;
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
switch (width_shift) {
case 1:
*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
break;
case 2:
*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
break;
case 3:
buffer = regs->regs[srcreg];
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
} else
#endif
{
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
switch (width_shift) {
case 1:
misaligned_kernel_word_store(address, val);
break;
case 2:
asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
break;
case 3:
asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
grab_fpu();
fpsave(&current->thread.fpu.hard);
release_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.fpu.hard.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.fpu.hard.fp_regs[destreg] = buflo;
current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_LITTLE_ENDIAN)
current->thread.fpu.hard.fp_regs[destreg] = bufhi;
current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
#else
current->thread.fpu.hard.fp_regs[destreg] = buflo;
current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fpu_store(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
/* Initialise these to NaNs. */
__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
grab_fpu();
fpsave(&current->thread.fpu.hard);
release_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
switch (width_shift) {
case 2:
buflo = current->thread.fpu.hard.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.fpu.hard.fp_regs[srcreg];
bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_LITTLE_ENDIAN)
bufhi = current->thread.fpu.hard.fp_regs[srcreg];
buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
#else
buflo = current->thread.fpu.hard.fp_regs[srcreg];
bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
*(__u32*) &buffer = buflo;
*(1 + (__u32*) &buffer) = bufhi;
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
#endif
static int misaligned_fixup(struct pt_regs *regs)
{
unsigned long opcode;
int error;
int major, minor;
#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
/* Never fixup user mode misaligned accesses without this option enabled. */
return -1;
#else
if (!user_mode_unaligned_fixup_enable) return -1;
#endif
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
return error;
}
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
--user_mode_unaligned_fixup_count;
/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
current->comm, current->pid, (__u32)regs->pc, opcode);
} else
#endif
if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
--kernel_mode_unaligned_fixup_count;
if (in_interrupt()) {
printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
(__u32)regs->pc, opcode);
} else {
printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
current->comm, current->pid, (__u32)regs->pc, opcode);
}
}
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
break;
case (0xb0>>2): /* LD.UW */
error = misaligned_load(regs, opcode, 1, 1, 0);
break;
case (0x88>>2): /* LD.L */
error = misaligned_load(regs, opcode, 1, 2, 1);
break;
case (0x8c>>2): /* LD.Q */
error = misaligned_load(regs, opcode, 1, 3, 0);
break;
case (0xa4>>2): /* ST.W */
error = misaligned_store(regs, opcode, 1, 1);
break;
case (0xa8>>2): /* ST.L */
error = misaligned_store(regs, opcode, 1, 2);
break;
case (0xac>>2): /* ST.Q */
error = misaligned_store(regs, opcode, 1, 3);
break;
case (0x40>>2): /* indexed loads */
switch (minor) {
case 0x1: /* LDX.W */
error = misaligned_load(regs, opcode, 0, 1, 1);
break;
case 0x5: /* LDX.UW */
error = misaligned_load(regs, opcode, 0, 1, 0);
break;
case 0x2: /* LDX.L */
error = misaligned_load(regs, opcode, 0, 2, 1);
break;
case 0x3: /* LDX.Q */
error = misaligned_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0x60>>2): /* indexed stores */
switch (minor) {
case 0x1: /* STX.W */
error = misaligned_store(regs, opcode, 0, 1);
break;
case 0x2: /* STX.L */
error = misaligned_store(regs, opcode, 0, 2);
break;
case 0x3: /* STX.Q */
error = misaligned_store(regs, opcode, 0, 3);
break;
default:
error = -1;
break;
}
break;
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
case (0x98>>2): /* FLD.P */
error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
break;
case (0x9c>>2): /* FLD.D */
error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
break;
case (0x1c>>2): /* floating indexed loads */
switch (minor) {
case 0x8: /* FLDX.S */
error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FLDX.P */
error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FLDX.D */
error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0xb4>>2): /* FLD.S */
error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
break;
case (0xb8>>2): /* FLD.P */
error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
break;
case (0xbc>>2): /* FLD.D */
error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
break;
case (0x3c>>2): /* floating indexed stores */
switch (minor) {
case 0x8: /* FSTX.S */
error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FSTX.P */
error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FSTX.D */
error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
#endif
default:
/* Fault */
error = -1;
break;
}
if (error < 0) {
return error;
} else {
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
}
static ctl_table unaligned_table[] = {
{1, "kernel_reports", &kernel_mode_unaligned_fixup_count,
sizeof(int), 0644, NULL, &proc_dointvec},
#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
{2, "user_reports", &user_mode_unaligned_fixup_count,
sizeof(int), 0644, NULL, &proc_dointvec},
{3, "user_enable", &user_mode_unaligned_fixup_enable,
sizeof(int), 0644, NULL, &proc_dointvec},
#endif
{0}
};
static ctl_table unaligned_root[] = {
{1, "unaligned_fixup", NULL, 0, 0555, unaligned_table},
{0}
};
static ctl_table sh64_root[] = {
{1, "sh64", NULL, 0, 0555, unaligned_root},
{0}
};
static struct ctl_table_header *sysctl_header;
static int __init init_sysctl(void)
{
sysctl_header = register_sysctl_table(sh64_root, 0);
return 0;
}
__initcall(init_sysctl);
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
u64 peek_real_address_q(u64 addr);
u64 poke_real_address_q(u64 addr, u64 val);
unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
unsigned long long exp_cause;
/* It's not worth ioremapping the debug module registers for the amount
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
if (exp_cause & ~4) {
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
}
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}

326
arch/sh64/kernel/unwind.c Normal file
查看文件

@@ -0,0 +1,326 @@
/*
* arch/sh64/kernel/unwind.c
*
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/io.h>
static u8 regcache[63];
/*
* Finding the previous stack frame isn't horribly straightforward as it is
* on some other platforms. In the sh64 case, we don't have "linked" stack
* frames, so we need to do a bit of work to determine the previous frame,
* and in turn, the previous r14/r18 pair.
*
* There are generally a few cases which determine where we can find out
* the r14/r18 values. In the general case, this can be determined by poking
* around the prologue of the symbol PC is in (note that we absolutely must
* have frame pointer support as well as the kernel symbol table mapped,
* otherwise we can't even get this far).
*
* In other cases, such as the interrupt/exception path, we can poke around
* the sp/fp.
*
* Notably, this entire approach is somewhat error prone, and in the event
* that the previous frame cannot be determined, that's all we can do.
* Either way, this still leaves us with a more correct backtrace then what
* we would be able to come up with by walking the stack (which is garbage
* for anything beyond the first frame).
* -- PFM.
*/
static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
unsigned long *pprev_fp, unsigned long *pprev_pc,
struct pt_regs *regs)
{
const char *sym;
char *modname, namebuf[128];
unsigned long offset, size;
unsigned long prologue = 0;
unsigned long fp_displacement = 0;
unsigned long fp_prev = 0;
unsigned long offset_r14 = 0, offset_r18 = 0;
int i, found_prologue_end = 0;
sym = kallsyms_lookup(pc, &size, &offset, &modname, namebuf);
if (!sym)
return -EINVAL;
prologue = pc - offset;
if (!prologue)
return -EINVAL;
/* Validate fp, to avoid risk of dereferencing a bad pointer later.
Assume 128Mb since that's the amount of RAM on a Cayman. Modify
when there is an SH-5 board with more. */
if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
(fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
((fp & 7) != 0)) {
return -EINVAL;
}
/*
* Depth to walk, depth is completely arbitrary.
*/
for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
unsigned long op;
u8 major, minor;
u8 src, dest, disp;
op = *(unsigned long *)prologue;
major = (op >> 26) & 0x3f;
src = (op >> 20) & 0x3f;
minor = (op >> 16) & 0xf;
disp = (op >> 10) & 0x3f;
dest = (op >> 4) & 0x3f;
/*
* Stack frame creation happens in a number of ways.. in the
* general case when the stack frame is less than 511 bytes,
* it's generally created by an addi or addi.l:
*
* addi/addi.l r15, -FRAME_SIZE, r15
*
* in the event that the frame size is bigger than this, it's
* typically created using a movi/sub pair as follows:
*
* movi FRAME_SIZE, rX
* sub r15, rX, r15
*/
switch (major) {
case (0x00 >> 2):
switch (minor) {
case 0x8: /* add.l */
case 0x9: /* add */
/* Look for r15, r63, r14 */
if (src == 15 && disp == 63 && dest == 14)
found_prologue_end = 1;
break;
case 0xa: /* sub.l */
case 0xb: /* sub */
if (src != 15 || dest != 15)
continue;
fp_displacement -= regcache[disp];
fp_prev = fp - fp_displacement;
break;
}
break;
case (0xa8 >> 2): /* st.l */
if (src != 15)
continue;
switch (dest) {
case 14:
if (offset_r14 || fp_displacement == 0)
continue;
offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r14 *= sizeof(unsigned long);
offset_r14 += fp_displacement;
break;
case 18:
if (offset_r18 || fp_displacement == 0)
continue;
offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r18 *= sizeof(unsigned long);
offset_r18 += fp_displacement;
break;
}
break;
case (0xcc >> 2): /* movi */
if (dest >= 63) {
printk(KERN_NOTICE "%s: Invalid dest reg %d "
"specified in movi handler. Failed "
"opcode was 0x%lx: ", __FUNCTION__,
dest, op);
continue;
}
/* Sign extend */
regcache[dest] =
((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
break;
case (0xd0 >> 2): /* addi */
case (0xd4 >> 2): /* addi.l */
/* Look for r15, -FRAME_SIZE, r15 */
if (src != 15 || dest != 15)
continue;
/* Sign extended frame size.. */
fp_displacement +=
(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
fp_prev = fp - fp_displacement;
break;
}
if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
break;
}
if (offset_r14 == 0 || fp_prev == 0) {
if (!offset_r14)
pr_debug("Unable to find r14 offset\n");
if (!fp_prev)
pr_debug("Unable to find previous fp\n");
return -EINVAL;
}
/* For innermost leaf function, there might not be a offset_r18 */
if (!*pprev_pc && (offset_r18 == 0))
return -EINVAL;
*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
if (offset_r18)
*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
*pprev_pc &= ~1;
return 0;
}
/* Don't put this on the stack since we'll want to call sh64_unwind
* when we're close to underflowing the stack anyway. */
static struct pt_regs here_regs;
extern const char syscall_ret;
extern const char ret_from_syscall;
extern const char ret_from_exception;
extern const char ret_from_irq;
static void sh64_unwind_inner(struct pt_regs *regs);
static void unwind_nested (unsigned long pc, unsigned long fp)
{
if ((fp >= __MEMORY_START) &&
((fp & 7) == 0)) {
sh64_unwind_inner((struct pt_regs *) fp);
}
}
static void sh64_unwind_inner(struct pt_regs *regs)
{
unsigned long pc, fp;
int ofs = 0;
int first_pass;
pc = regs->pc & ~1;
fp = regs->regs[14];
first_pass = 1;
for (;;) {
int cond;
unsigned long next_fp, next_pc;
if (pc == ((unsigned long) &syscall_ret & ~1)) {
printk("SYSCALL\n");
unwind_nested(pc,fp);
return;
}
if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
printk("SYSCALL (PREEMPTED)\n");
unwind_nested(pc,fp);
return;
}
/* In this case, the PC is discovered by lookup_prev_stack_frame but
it has 4 taken off it to look like the 'caller' */
if (pc == ((unsigned long) &ret_from_exception & ~1)) {
printk("EXCEPTION\n");
unwind_nested(pc,fp);
return;
}
if (pc == ((unsigned long) &ret_from_irq & ~1)) {
printk("IRQ\n");
unwind_nested(pc,fp);
return;
}
cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
((pc & 3) == 0) && ((fp & 7) == 0));
pc -= ofs;
printk("[<%08lx>] ", pc);
print_symbol("%s\n", pc);
if (first_pass) {
/* If the innermost frame is a leaf function, it's
* possible that r18 is never saved out to the stack.
*/
next_pc = regs->regs[18];
} else {
next_pc = 0;
}
if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
ofs = sizeof(unsigned long);
pc = next_pc & ~1;
fp = next_fp;
} else {
printk("Unable to lookup previous stack frame\n");
break;
}
first_pass = 0;
}
printk("\n");
}
void sh64_unwind(struct pt_regs *regs)
{
if (!regs) {
/*
* Fetch current regs if we have no other saved state to back
* trace from.
*/
regs = &here_regs;
__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
__asm__ __volatile__ (
"pta 0f, tr0\n\t"
"blink tr0, %0\n\t"
"0: nop"
: "=r" (regs->pc)
);
}
printk("\nCall Trace:\n");
sh64_unwind_inner(regs);
}

查看文件

@@ -0,0 +1,181 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh5/vmlinux.lds.S
*
* ld script to make ST50 Linux kernel
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* benedict.gaster@superh.com: 2nd May 2002
* Add definition of empty_zero_page to be the first page of kernel image.
*
* benedict.gaster@superh.com: 3rd May 2002
* Added support for ramdisk, removing statically linked romfs at the same time.
*
* lethal@linux-sh.org: 9th May 2003
* Kill off GLOBAL_NAME() usage and other CDC-isms.
*
* lethal@linux-sh.org: 19th May 2003
* Remove support for ancient toolchains.
*/
#include <linux/config.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#define LOAD_OFFSET CONFIG_CACHED_MEMORY_OFFSET
#include <asm-generic/vmlinux.lds.h>
#ifdef NOTDEF
#ifdef CONFIG_LITTLE_ENDIAN
OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux")
#else
OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64")
#endif
#endif
OUTPUT_ARCH(sh:sh5)
#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
ENTRY(__start)
SECTIONS
{
. = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
_text = .; /* Text and read-only data */
text = .; /* Text and read-only data */
.empty_zero_page : C_PHYS(.empty_zero_page) {
*(.empty_zero_page)
} = 0
.text : C_PHYS(.text) {
*(.text)
*(.text64)
*(.text..SHmedia32)
SCHED_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
#ifdef CONFIG_LITTLE_ENDIAN
} = 0x6ff0fff0
#else
} = 0xf0fff06f
#endif
/* We likely want __ex_table to be Cache Line aligned */
. = ALIGN(L1_CACHE_BYTES); /* Exception table */
__start___ex_table = .;
__ex_table : C_PHYS(__ex_table) { *(__ex_table) }
__stop___ex_table = .;
RODATA
_etext = .; /* End of text section */
.data : C_PHYS(.data) { /* Data */
*(.data)
CONSTRUCTORS
}
. = ALIGN(PAGE_SIZE);
.data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) }
. = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) }
__per_cpu_end = . ;
.data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
_edata = .; /* End of data section */
. = ALIGN(THREAD_SIZE); /* init_task: structure size aligned */
.data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) }
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
_sinittext = .;
.init.text : C_PHYS(.init.text) { *(.init.text) }
_einittext = .;
.init.data : C_PHYS(.init.data) { *(.init.data) }
. = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */
__setup_start = .;
.init.setup : C_PHYS(.init.setup) { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : C_PHYS(.initcall.init) {
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
__initramfs_start = .;
.init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* Align to the biggest single data representation, head and tail */
. = ALIGN(8);
__bss_start = .; /* BSS */
.bss : C_PHYS(.bss) {
*(.bss)
}
. = ALIGN(8);
_end = . ;
/* Sections to be discarded */
/DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
}
/* Stabs debugging sections. */
.stab 0 : C_PHYS(.stab) { *(.stab) }
.stabstr 0 : C_PHYS(.stabstr) { *(.stabstr) }
.stab.excl 0 : C_PHYS(.stab.excl) { *(.stab.excl) }
.stab.exclstr 0 : C_PHYS(.stab.exclstr) { *(.stab.exclstr) }
.stab.index 0 : C_PHYS(.stab.index) { *(.stab.index) }
.stab.indexstr 0 : C_PHYS(.stab.indexstr) { *(.stab.indexstr) }
.comment 0 : C_PHYS(.comment) { *(.comment) }
/* DWARF debug sections.
Symbols in the DWARF debugging section are relative to the beginning
of the section so we begin .debug at 0. */
/* DWARF 1 */
.debug 0 : C_PHYS(.debug) { *(.debug) }
.line 0 : C_PHYS(.line) { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : C_PHYS(.debug_srcinfo) { *(.debug_srcinfo) }
.debug_sfnames 0 : C_PHYS(.debug_sfnames) { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : C_PHYS(.debug_aranges) { *(.debug_aranges) }
.debug_pubnames 0 : C_PHYS(.debug_pubnames) { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : C_PHYS(.debug_info) { *(.debug_info) }
.debug_abbrev 0 : C_PHYS(.debug_abbrev) { *(.debug_abbrev) }
.debug_line 0 : C_PHYS(.debug_line) { *(.debug_line) }
.debug_frame 0 : C_PHYS(.debug_frame) { *(.debug_frame) }
.debug_str 0 : C_PHYS(.debug_str) { *(.debug_str) }
.debug_loc 0 : C_PHYS(.debug_loc) { *(.debug_loc) }
.debug_macinfo 0 : C_PHYS(.debug_macinfo) { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : C_PHYS(.debug_weaknames) { *(.debug_weaknames) }
.debug_funcnames 0 : C_PHYS(.debug_funcnames) { *(.debug_funcnames) }
.debug_typenames 0 : C_PHYS(.debug_typenames) { *(.debug_typenames) }
.debug_varnames 0 : C_PHYS(.debug_varnames) { *(.debug_varnames) }
/* These must appear regardless of . */
}