Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (112 commits) sh: Move SH-4 CPU headers down one more level. sh: Only build in gpio.o when CONFIG_GENERIC_GPIO is selected. sh: Migrate common board headers to mach-common/. sh: Move the CPU definition headers from asm/ to cpu/. serial: sh-sci: Add support SCIF of SH7723 video: add sh_mobile_lcdc platform flags video: remove unused sh_mobile_lcdc platform data sh: remove consistent alloc cruft sh: add dynamic crash base address support sh: reduce Migo-R smc91x overruns sh: Fix up some merge damage. Fix debugfs_create_file's error checking method for arch/sh/mm/ Fix debugfs_create_dir's error checking method for arch/sh/kernel/ sh: ap325rxa: Add support RTC RX-8564LC in AP325RXA board sh: Use sh7720 GPIO on magicpanelr2 board sh: Add sh7720 pinmux code sh: Use sh7203 GPIO on rsk7203 board sh: Add sh7203 pinmux code sh: Use sh7723 GPIO on AP325RXA board sh: Add sh7723 pinmux code ...
This commit is contained in:
@@ -21,7 +21,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_PM) += pm.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_ELF_CORE) += dump_task.o
|
||||
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
|
||||
|
||||
EXTRA_CFLAGS += -Werror
|
||||
|
@@ -17,7 +17,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_PM) += pm.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_BINFMT_ELF) += dump_task.o
|
||||
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
|
||||
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
|
||||
|
||||
EXTRA_CFLAGS += -Werror
|
||||
|
@@ -294,9 +294,10 @@ arch_init_clk_ops(struct clk_ops **ops, int type)
|
||||
{
|
||||
}
|
||||
|
||||
void __init __attribute__ ((weak))
|
||||
int __init __attribute__ ((weak))
|
||||
arch_clk_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int show_clocks(char *buf, char **start, off_t off,
|
||||
@@ -331,7 +332,7 @@ int __init clk_init(void)
|
||||
ret |= clk_register(clk);
|
||||
}
|
||||
|
||||
arch_clk_init();
|
||||
ret |= arch_clk_init();
|
||||
|
||||
/* Kick the child clocks.. */
|
||||
propagate_rate(&master_clk);
|
||||
|
@@ -1,8 +1,6 @@
|
||||
#
|
||||
# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
|
||||
#
|
||||
obj-y += intc.o
|
||||
|
||||
obj-$(CONFIG_SUPERH32) += imask.o
|
||||
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
|
||||
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
|
||||
|
@@ -1,710 +0,0 @@
|
||||
/*
|
||||
* Shared interrupt handling code for IPR and INTC2 types of IRQs.
|
||||
*
|
||||
* Copyright (C) 2007, 2008 Magnus Damm
|
||||
*
|
||||
* Based on intc2.c and ipr.c
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
|
||||
* Copyright (C) 2000 Kazumoto Kojima
|
||||
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
|
||||
* Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
|
||||
* Copyright (C) 2005, 2006 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
|
||||
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
|
||||
((addr_e) << 16) | ((addr_d << 24)))
|
||||
|
||||
#define _INTC_SHIFT(h) (h & 0x1f)
|
||||
#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
|
||||
#define _INTC_FN(h) ((h >> 9) & 0xf)
|
||||
#define _INTC_MODE(h) ((h >> 13) & 0x7)
|
||||
#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
|
||||
#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
|
||||
|
||||
struct intc_handle_int {
|
||||
unsigned int irq;
|
||||
unsigned long handle;
|
||||
};
|
||||
|
||||
struct intc_desc_int {
|
||||
unsigned long *reg;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long *smp;
|
||||
#endif
|
||||
unsigned int nr_reg;
|
||||
struct intc_handle_int *prio;
|
||||
unsigned int nr_prio;
|
||||
struct intc_handle_int *sense;
|
||||
unsigned int nr_sense;
|
||||
struct irq_chip chip;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define IS_SMP(x) x.smp
|
||||
#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
|
||||
#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
|
||||
#else
|
||||
#define IS_SMP(x) 0
|
||||
#define INTC_REG(d, x, c) (d->reg[(x)])
|
||||
#define SMP_NR(d, x) 1
|
||||
#endif
|
||||
|
||||
static unsigned int intc_prio_level[NR_IRQS]; /* for now */
|
||||
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
|
||||
static unsigned long ack_handle[NR_IRQS];
|
||||
#endif
|
||||
|
||||
static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
|
||||
{
|
||||
struct irq_chip *chip = get_irq_chip(irq);
|
||||
return (void *)((char *)chip - offsetof(struct intc_desc_int, chip));
|
||||
}
|
||||
|
||||
static inline unsigned int set_field(unsigned int value,
|
||||
unsigned int field_value,
|
||||
unsigned int handle)
|
||||
{
|
||||
unsigned int width = _INTC_WIDTH(handle);
|
||||
unsigned int shift = _INTC_SHIFT(handle);
|
||||
|
||||
value &= ~(((1 << width) - 1) << shift);
|
||||
value |= field_value << shift;
|
||||
return value;
|
||||
}
|
||||
|
||||
static void write_8(unsigned long addr, unsigned long h, unsigned long data)
|
||||
{
|
||||
ctrl_outb(set_field(0, data, h), addr);
|
||||
}
|
||||
|
||||
static void write_16(unsigned long addr, unsigned long h, unsigned long data)
|
||||
{
|
||||
ctrl_outw(set_field(0, data, h), addr);
|
||||
}
|
||||
|
||||
static void write_32(unsigned long addr, unsigned long h, unsigned long data)
|
||||
{
|
||||
ctrl_outl(set_field(0, data, h), addr);
|
||||
}
|
||||
|
||||
static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
ctrl_outb(set_field(ctrl_inb(addr), data, h), addr);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
ctrl_outw(set_field(ctrl_inw(addr), data, h), addr);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
ctrl_outl(set_field(ctrl_inl(addr), data, h), addr);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
|
||||
|
||||
static void (*intc_reg_fns[])(unsigned long addr,
|
||||
unsigned long h,
|
||||
unsigned long data) = {
|
||||
[REG_FN_WRITE_BASE + 0] = write_8,
|
||||
[REG_FN_WRITE_BASE + 1] = write_16,
|
||||
[REG_FN_WRITE_BASE + 3] = write_32,
|
||||
[REG_FN_MODIFY_BASE + 0] = modify_8,
|
||||
[REG_FN_MODIFY_BASE + 1] = modify_16,
|
||||
[REG_FN_MODIFY_BASE + 3] = modify_32,
|
||||
};
|
||||
|
||||
enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
|
||||
MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
|
||||
MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
|
||||
MODE_PRIO_REG, /* Priority value written to enable interrupt */
|
||||
MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
|
||||
};
|
||||
|
||||
static void intc_mode_field(unsigned long addr,
|
||||
unsigned long handle,
|
||||
void (*fn)(unsigned long,
|
||||
unsigned long,
|
||||
unsigned long),
|
||||
unsigned int irq)
|
||||
{
|
||||
fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
|
||||
}
|
||||
|
||||
static void intc_mode_zero(unsigned long addr,
|
||||
unsigned long handle,
|
||||
void (*fn)(unsigned long,
|
||||
unsigned long,
|
||||
unsigned long),
|
||||
unsigned int irq)
|
||||
{
|
||||
fn(addr, handle, 0);
|
||||
}
|
||||
|
||||
static void intc_mode_prio(unsigned long addr,
|
||||
unsigned long handle,
|
||||
void (*fn)(unsigned long,
|
||||
unsigned long,
|
||||
unsigned long),
|
||||
unsigned int irq)
|
||||
{
|
||||
fn(addr, handle, intc_prio_level[irq]);
|
||||
}
|
||||
|
||||
static void (*intc_enable_fns[])(unsigned long addr,
|
||||
unsigned long handle,
|
||||
void (*fn)(unsigned long,
|
||||
unsigned long,
|
||||
unsigned long),
|
||||
unsigned int irq) = {
|
||||
[MODE_ENABLE_REG] = intc_mode_field,
|
||||
[MODE_MASK_REG] = intc_mode_zero,
|
||||
[MODE_DUAL_REG] = intc_mode_field,
|
||||
[MODE_PRIO_REG] = intc_mode_prio,
|
||||
[MODE_PCLR_REG] = intc_mode_prio,
|
||||
};
|
||||
|
||||
static void (*intc_disable_fns[])(unsigned long addr,
|
||||
unsigned long handle,
|
||||
void (*fn)(unsigned long,
|
||||
unsigned long,
|
||||
unsigned long),
|
||||
unsigned int irq) = {
|
||||
[MODE_ENABLE_REG] = intc_mode_zero,
|
||||
[MODE_MASK_REG] = intc_mode_field,
|
||||
[MODE_DUAL_REG] = intc_mode_field,
|
||||
[MODE_PRIO_REG] = intc_mode_zero,
|
||||
[MODE_PCLR_REG] = intc_mode_field,
|
||||
};
|
||||
|
||||
static inline void _intc_enable(unsigned int irq, unsigned long handle)
|
||||
{
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned long addr;
|
||||
unsigned int cpu;
|
||||
|
||||
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
|
||||
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
|
||||
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
|
||||
[_INTC_FN(handle)], irq);
|
||||
}
|
||||
}
|
||||
|
||||
static void intc_enable(unsigned int irq)
|
||||
{
|
||||
_intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
|
||||
}
|
||||
|
||||
static void intc_disable(unsigned int irq)
|
||||
{
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned long handle = (unsigned long) get_irq_chip_data(irq);
|
||||
unsigned long addr;
|
||||
unsigned int cpu;
|
||||
|
||||
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
|
||||
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
|
||||
intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
|
||||
[_INTC_FN(handle)], irq);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
|
||||
static void intc_mask_ack(unsigned int irq)
|
||||
{
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned long handle = ack_handle[irq];
|
||||
unsigned long addr;
|
||||
|
||||
intc_disable(irq);
|
||||
|
||||
/* read register and write zero only to the assocaited bit */
|
||||
|
||||
if (handle) {
|
||||
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
|
||||
switch (_INTC_FN(handle)) {
|
||||
case REG_FN_MODIFY_BASE + 0: /* 8bit */
|
||||
ctrl_inb(addr);
|
||||
ctrl_outb(0xff ^ set_field(0, 1, handle), addr);
|
||||
break;
|
||||
case REG_FN_MODIFY_BASE + 1: /* 16bit */
|
||||
ctrl_inw(addr);
|
||||
ctrl_outw(0xffff ^ set_field(0, 1, handle), addr);
|
||||
break;
|
||||
case REG_FN_MODIFY_BASE + 3: /* 32bit */
|
||||
ctrl_inl(addr);
|
||||
ctrl_outl(0xffffffff ^ set_field(0, 1, handle), addr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
|
||||
unsigned int nr_hp,
|
||||
unsigned int irq)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* this doesn't scale well, but...
|
||||
*
|
||||
* this function should only be used for cerain uncommon
|
||||
* operations such as intc_set_priority() and intc_set_sense()
|
||||
* and in those rare cases performance doesn't matter that much.
|
||||
* keeping the memory footprint low is more important.
|
||||
*
|
||||
* one rather simple way to speed this up and still keep the
|
||||
* memory footprint down is to make sure the array is sorted
|
||||
* and then perform a bisect to lookup the irq.
|
||||
*/
|
||||
|
||||
for (i = 0; i < nr_hp; i++) {
|
||||
if ((hp + i)->irq != irq)
|
||||
continue;
|
||||
|
||||
return hp + i;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int intc_set_priority(unsigned int irq, unsigned int prio)
|
||||
{
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
struct intc_handle_int *ihp;
|
||||
|
||||
if (!intc_prio_level[irq] || prio <= 1)
|
||||
return -EINVAL;
|
||||
|
||||
ihp = intc_find_irq(d->prio, d->nr_prio, irq);
|
||||
if (ihp) {
|
||||
if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
|
||||
return -EINVAL;
|
||||
|
||||
intc_prio_level[irq] = prio;
|
||||
|
||||
/*
|
||||
* only set secondary masking method directly
|
||||
* primary masking method is using intc_prio_level[irq]
|
||||
* priority level will be set during next enable()
|
||||
*/
|
||||
|
||||
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
|
||||
_intc_enable(irq, ihp->handle);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define VALID(x) (x | 0x80)
|
||||
|
||||
static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
|
||||
[IRQ_TYPE_EDGE_FALLING] = VALID(0),
|
||||
[IRQ_TYPE_EDGE_RISING] = VALID(1),
|
||||
[IRQ_TYPE_LEVEL_LOW] = VALID(2),
|
||||
/* SH7706, SH7707 and SH7709 do not support high level triggered */
|
||||
#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
|
||||
!defined(CONFIG_CPU_SUBTYPE_SH7707) && \
|
||||
!defined(CONFIG_CPU_SUBTYPE_SH7709)
|
||||
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
|
||||
#endif
|
||||
};
|
||||
|
||||
static int intc_set_sense(unsigned int irq, unsigned int type)
|
||||
{
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
|
||||
struct intc_handle_int *ihp;
|
||||
unsigned long addr;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
ihp = intc_find_irq(d->sense, d->nr_sense, irq);
|
||||
if (ihp) {
|
||||
addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
|
||||
intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int __init intc_get_reg(struct intc_desc_int *d,
|
||||
unsigned long address)
|
||||
{
|
||||
unsigned int k;
|
||||
|
||||
for (k = 0; k < d->nr_reg; k++) {
|
||||
if (d->reg[k] == address)
|
||||
return k;
|
||||
}
|
||||
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static intc_enum __init intc_grp_id(struct intc_desc *desc,
|
||||
intc_enum enum_id)
|
||||
{
|
||||
struct intc_group *g = desc->groups;
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
|
||||
g = desc->groups + i;
|
||||
|
||||
for (j = 0; g->enum_ids[j]; j++) {
|
||||
if (g->enum_ids[j] != enum_id)
|
||||
continue;
|
||||
|
||||
return g->enum_id;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int __init intc_mask_data(struct intc_desc *desc,
|
||||
struct intc_desc_int *d,
|
||||
intc_enum enum_id, int do_grps)
|
||||
{
|
||||
struct intc_mask_reg *mr = desc->mask_regs;
|
||||
unsigned int i, j, fn, mode;
|
||||
unsigned long reg_e, reg_d;
|
||||
|
||||
for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
|
||||
mr = desc->mask_regs + i;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
|
||||
if (mr->enum_ids[j] != enum_id)
|
||||
continue;
|
||||
|
||||
if (mr->set_reg && mr->clr_reg) {
|
||||
fn = REG_FN_WRITE_BASE;
|
||||
mode = MODE_DUAL_REG;
|
||||
reg_e = mr->clr_reg;
|
||||
reg_d = mr->set_reg;
|
||||
} else {
|
||||
fn = REG_FN_MODIFY_BASE;
|
||||
if (mr->set_reg) {
|
||||
mode = MODE_ENABLE_REG;
|
||||
reg_e = mr->set_reg;
|
||||
reg_d = mr->set_reg;
|
||||
} else {
|
||||
mode = MODE_MASK_REG;
|
||||
reg_e = mr->clr_reg;
|
||||
reg_d = mr->clr_reg;
|
||||
}
|
||||
}
|
||||
|
||||
fn += (mr->reg_width >> 3) - 1;
|
||||
return _INTC_MK(fn, mode,
|
||||
intc_get_reg(d, reg_e),
|
||||
intc_get_reg(d, reg_d),
|
||||
1,
|
||||
(mr->reg_width - 1) - j);
|
||||
}
|
||||
}
|
||||
|
||||
if (do_grps)
|
||||
return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int __init intc_prio_data(struct intc_desc *desc,
|
||||
struct intc_desc_int *d,
|
||||
intc_enum enum_id, int do_grps)
|
||||
{
|
||||
struct intc_prio_reg *pr = desc->prio_regs;
|
||||
unsigned int i, j, fn, mode, bit;
|
||||
unsigned long reg_e, reg_d;
|
||||
|
||||
for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
|
||||
pr = desc->prio_regs + i;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
|
||||
if (pr->enum_ids[j] != enum_id)
|
||||
continue;
|
||||
|
||||
if (pr->set_reg && pr->clr_reg) {
|
||||
fn = REG_FN_WRITE_BASE;
|
||||
mode = MODE_PCLR_REG;
|
||||
reg_e = pr->set_reg;
|
||||
reg_d = pr->clr_reg;
|
||||
} else {
|
||||
fn = REG_FN_MODIFY_BASE;
|
||||
mode = MODE_PRIO_REG;
|
||||
if (!pr->set_reg)
|
||||
BUG();
|
||||
reg_e = pr->set_reg;
|
||||
reg_d = pr->set_reg;
|
||||
}
|
||||
|
||||
fn += (pr->reg_width >> 3) - 1;
|
||||
bit = pr->reg_width - ((j + 1) * pr->field_width);
|
||||
|
||||
BUG_ON(bit < 0);
|
||||
|
||||
return _INTC_MK(fn, mode,
|
||||
intc_get_reg(d, reg_e),
|
||||
intc_get_reg(d, reg_d),
|
||||
pr->field_width, bit);
|
||||
}
|
||||
}
|
||||
|
||||
if (do_grps)
|
||||
return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
|
||||
static unsigned int __init intc_ack_data(struct intc_desc *desc,
|
||||
struct intc_desc_int *d,
|
||||
intc_enum enum_id)
|
||||
{
|
||||
struct intc_mask_reg *mr = desc->ack_regs;
|
||||
unsigned int i, j, fn, mode;
|
||||
unsigned long reg_e, reg_d;
|
||||
|
||||
for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) {
|
||||
mr = desc->ack_regs + i;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
|
||||
if (mr->enum_ids[j] != enum_id)
|
||||
continue;
|
||||
|
||||
fn = REG_FN_MODIFY_BASE;
|
||||
mode = MODE_ENABLE_REG;
|
||||
reg_e = mr->set_reg;
|
||||
reg_d = mr->set_reg;
|
||||
|
||||
fn += (mr->reg_width >> 3) - 1;
|
||||
return _INTC_MK(fn, mode,
|
||||
intc_get_reg(d, reg_e),
|
||||
intc_get_reg(d, reg_d),
|
||||
1,
|
||||
(mr->reg_width - 1) - j);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned int __init intc_sense_data(struct intc_desc *desc,
|
||||
struct intc_desc_int *d,
|
||||
intc_enum enum_id)
|
||||
{
|
||||
struct intc_sense_reg *sr = desc->sense_regs;
|
||||
unsigned int i, j, fn, bit;
|
||||
|
||||
for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
|
||||
sr = desc->sense_regs + i;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
|
||||
if (sr->enum_ids[j] != enum_id)
|
||||
continue;
|
||||
|
||||
fn = REG_FN_MODIFY_BASE;
|
||||
fn += (sr->reg_width >> 3) - 1;
|
||||
bit = sr->reg_width - ((j + 1) * sr->field_width);
|
||||
|
||||
BUG_ON(bit < 0);
|
||||
|
||||
return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
|
||||
0, sr->field_width, bit);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init intc_register_irq(struct intc_desc *desc,
|
||||
struct intc_desc_int *d,
|
||||
intc_enum enum_id,
|
||||
unsigned int irq)
|
||||
{
|
||||
struct intc_handle_int *hp;
|
||||
unsigned int data[2], primary;
|
||||
|
||||
/* Prefer single interrupt source bitmap over other combinations:
|
||||
* 1. bitmap, single interrupt source
|
||||
* 2. priority, single interrupt source
|
||||
* 3. bitmap, multiple interrupt sources (groups)
|
||||
* 4. priority, multiple interrupt sources (groups)
|
||||
*/
|
||||
|
||||
data[0] = intc_mask_data(desc, d, enum_id, 0);
|
||||
data[1] = intc_prio_data(desc, d, enum_id, 0);
|
||||
|
||||
primary = 0;
|
||||
if (!data[0] && data[1])
|
||||
primary = 1;
|
||||
|
||||
data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
|
||||
data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
|
||||
|
||||
if (!data[primary])
|
||||
primary ^= 1;
|
||||
|
||||
BUG_ON(!data[primary]); /* must have primary masking method */
|
||||
|
||||
disable_irq_nosync(irq);
|
||||
set_irq_chip_and_handler_name(irq, &d->chip,
|
||||
handle_level_irq, "level");
|
||||
set_irq_chip_data(irq, (void *)data[primary]);
|
||||
|
||||
/* set priority level
|
||||
* - this needs to be at least 2 for 5-bit priorities on 7780
|
||||
*/
|
||||
intc_prio_level[irq] = 2;
|
||||
|
||||
/* enable secondary masking method if present */
|
||||
if (data[!primary])
|
||||
_intc_enable(irq, data[!primary]);
|
||||
|
||||
/* add irq to d->prio list if priority is available */
|
||||
if (data[1]) {
|
||||
hp = d->prio + d->nr_prio;
|
||||
hp->irq = irq;
|
||||
hp->handle = data[1];
|
||||
|
||||
if (primary) {
|
||||
/*
|
||||
* only secondary priority should access registers, so
|
||||
* set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
|
||||
*/
|
||||
|
||||
hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
|
||||
hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
|
||||
}
|
||||
d->nr_prio++;
|
||||
}
|
||||
|
||||
/* add irq to d->sense list if sense is available */
|
||||
data[0] = intc_sense_data(desc, d, enum_id);
|
||||
if (data[0]) {
|
||||
(d->sense + d->nr_sense)->irq = irq;
|
||||
(d->sense + d->nr_sense)->handle = data[0];
|
||||
d->nr_sense++;
|
||||
}
|
||||
|
||||
/* irq should be disabled by default */
|
||||
d->chip.mask(irq);
|
||||
|
||||
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
|
||||
if (desc->ack_regs)
|
||||
ack_handle[irq] = intc_ack_data(desc, d, enum_id);
|
||||
#endif
|
||||
}
|
||||
|
||||
static unsigned int __init save_reg(struct intc_desc_int *d,
|
||||
unsigned int cnt,
|
||||
unsigned long value,
|
||||
unsigned int smp)
|
||||
{
|
||||
if (value) {
|
||||
d->reg[cnt] = value;
|
||||
#ifdef CONFIG_SMP
|
||||
d->smp[cnt] = smp;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void __init register_intc_controller(struct intc_desc *desc)
|
||||
{
|
||||
unsigned int i, k, smp;
|
||||
struct intc_desc_int *d;
|
||||
|
||||
d = alloc_bootmem(sizeof(*d));
|
||||
|
||||
d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
|
||||
d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
|
||||
d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
|
||||
|
||||
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
|
||||
d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
|
||||
#endif
|
||||
d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg));
|
||||
#ifdef CONFIG_SMP
|
||||
d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp));
|
||||
#endif
|
||||
k = 0;
|
||||
|
||||
if (desc->mask_regs) {
|
||||
for (i = 0; i < desc->nr_mask_regs; i++) {
|
||||
smp = IS_SMP(desc->mask_regs[i]);
|
||||
k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
|
||||
k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
|
||||
}
|
||||
}
|
||||
|
||||
if (desc->prio_regs) {
|
||||
d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio));
|
||||
|
||||
for (i = 0; i < desc->nr_prio_regs; i++) {
|
||||
smp = IS_SMP(desc->prio_regs[i]);
|
||||
k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
|
||||
k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
|
||||
}
|
||||
}
|
||||
|
||||
if (desc->sense_regs) {
|
||||
d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense));
|
||||
|
||||
for (i = 0; i < desc->nr_sense_regs; i++) {
|
||||
k += save_reg(d, k, desc->sense_regs[i].reg, 0);
|
||||
}
|
||||
}
|
||||
|
||||
d->chip.name = desc->name;
|
||||
d->chip.mask = intc_disable;
|
||||
d->chip.unmask = intc_enable;
|
||||
d->chip.mask_ack = intc_disable;
|
||||
d->chip.set_type = intc_set_sense;
|
||||
|
||||
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
|
||||
if (desc->ack_regs) {
|
||||
for (i = 0; i < desc->nr_ack_regs; i++)
|
||||
k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
|
||||
|
||||
d->chip.mask_ack = intc_mask_ack;
|
||||
}
|
||||
#endif
|
||||
|
||||
BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
|
||||
|
||||
for (i = 0; i < desc->nr_vectors; i++) {
|
||||
struct intc_vect *vect = desc->vectors + i;
|
||||
|
||||
intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect));
|
||||
}
|
||||
}
|
@@ -33,7 +33,7 @@ static void disable_ipr_irq(unsigned int irq)
|
||||
struct ipr_data *p = get_irq_chip_data(irq);
|
||||
unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
|
||||
/* Set the priority in IPR to 0 */
|
||||
ctrl_outw(ctrl_inw(addr) & (0xffff ^ (0xf << p->shift)), addr);
|
||||
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
|
||||
}
|
||||
|
||||
static void enable_ipr_irq(unsigned int irq)
|
||||
@@ -41,7 +41,7 @@ static void enable_ipr_irq(unsigned int irq)
|
||||
struct ipr_data *p = get_irq_chip_data(irq);
|
||||
unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
|
||||
/* Set priority in IPR back to original value */
|
||||
ctrl_outw(ctrl_inw(addr) | (p->priority << p->shift), addr);
|
||||
__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -12,3 +12,8 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
|
||||
obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
|
||||
obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
|
||||
obj-$(CONFIG_CPU_SUBTYPE_MXG) += setup-mxg.o clock-sh7206.o
|
||||
|
||||
# Pinmux setup
|
||||
pinmux-$(CONFIG_CPU_SUBTYPE_SH7203) := pinmux-sh7203.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
|
||||
|
1599
arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
Normal file
1599
arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -24,4 +24,8 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o
|
||||
clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o
|
||||
clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o
|
||||
|
||||
# Pinmux setup
|
||||
pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o
|
||||
|
||||
obj-y += $(clock-y)
|
||||
obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
|
||||
|
1242
arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
Normal file
1242
arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -36,7 +36,7 @@ extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
|
||||
extern unsigned long long float64_sub(unsigned long long a,
|
||||
unsigned long long b);
|
||||
extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
|
||||
|
||||
extern unsigned long int float64_to_float32(unsigned long long a);
|
||||
static unsigned int fpu_exception_flags;
|
||||
|
||||
/*
|
||||
@@ -415,6 +415,29 @@ static int ieee_fpe_handler(struct pt_regs *regs)
|
||||
} else
|
||||
return 0;
|
||||
|
||||
regs->pc = nextpc;
|
||||
return 1;
|
||||
} else if ((finsn & 0xf0bd) == 0xf0bd) {
|
||||
/* fcnvds - double to single precision convert */
|
||||
struct task_struct *tsk = current;
|
||||
int m;
|
||||
unsigned int hx;
|
||||
|
||||
m = (finsn >> 9) & 0x7;
|
||||
hx = tsk->thread.fpu.hard.fp_regs[m];
|
||||
|
||||
if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)
|
||||
&& ((hx & 0x7fffffff) < 0x00100000)) {
|
||||
/* subnormal double to float conversion */
|
||||
long long llx;
|
||||
|
||||
llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32)
|
||||
| tsk->thread.fpu.hard.fp_regs[m + 1];
|
||||
|
||||
tsk->thread.fpu.hard.fpul = float64_to_float32(llx);
|
||||
} else
|
||||
return 0;
|
||||
|
||||
regs->pc = nextpc;
|
||||
return 1;
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/serial.h>
|
||||
#include <linux/serial_sci.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
enum {
|
||||
UNUSED = 0,
|
||||
@@ -178,10 +179,14 @@ static int __init sh7760_devices_setup(void)
|
||||
}
|
||||
__initcall(sh7760_devices_setup);
|
||||
|
||||
#define INTC_ICR 0xffd00000UL
|
||||
#define INTC_ICR_IRLM (1 << 7)
|
||||
|
||||
void __init plat_irq_setup_pins(int mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case IRQ_MODE_IRQ:
|
||||
ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
|
||||
register_intc_controller(&intc_desc_irq);
|
||||
break;
|
||||
default:
|
||||
|
@@ -85,6 +85,7 @@ float64 float64_div(float64 a, float64 b);
|
||||
float32 float32_div(float32 a, float32 b);
|
||||
float32 float32_mul(float32 a, float32 b);
|
||||
float64 float64_mul(float64 a, float64 b);
|
||||
float32 float64_to_float32(float64 a);
|
||||
inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
|
||||
bits64 * z1Ptr);
|
||||
inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
|
||||
@@ -890,3 +891,31 @@ float64 float64_mul(float64 a, float64 b)
|
||||
}
|
||||
return roundAndPackFloat64(zSign, zExp, zSig0);
|
||||
}
|
||||
|
||||
/*
|
||||
* -------------------------------------------------------------------------------
|
||||
* Returns the result of converting the double-precision floating-point value
|
||||
* `a' to the single-precision floating-point format. The conversion is
|
||||
* performed according to the IEC/IEEE Standard for Binary Floating-point
|
||||
* Arithmetic.
|
||||
* -------------------------------------------------------------------------------
|
||||
* */
|
||||
float32 float64_to_float32(float64 a)
|
||||
{
|
||||
flag aSign;
|
||||
int16 aExp;
|
||||
bits64 aSig;
|
||||
bits32 zSig;
|
||||
|
||||
aSig = extractFloat64Frac( a );
|
||||
aExp = extractFloat64Exp( a );
|
||||
aSign = extractFloat64Sign( a );
|
||||
|
||||
shift64RightJamming( aSig, 22, &aSig );
|
||||
zSig = aSig;
|
||||
if ( aExp || zSig ) {
|
||||
zSig |= 0x40000000;
|
||||
aExp -= 0x381;
|
||||
}
|
||||
return roundAndPackFloat32(aSign, aExp, zSig);
|
||||
}
|
||||
|
@@ -27,5 +27,10 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7722.o
|
||||
clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7722.o
|
||||
clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
|
||||
|
||||
# Pinmux setup
|
||||
pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o
|
||||
pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o
|
||||
|
||||
obj-y += $(clock-y)
|
||||
obj-$(CONFIG_SMP) += $(smp-y)
|
||||
obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
|
||||
|
1783
arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
Normal file
1783
arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
Normal file
File diff suppressed because it is too large
Load Diff
1909
arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
Normal file
1909
arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* SH-X3 SMP
|
||||
*
|
||||
* Copyright (C) 2007 Paul Mundt
|
||||
* Copyright (C) 2007 - 2008 Paul Mundt
|
||||
* Copyright (C) 2007 Magnus Damm
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
@@ -14,6 +14,22 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
|
||||
{
|
||||
unsigned int message = (unsigned int)(long)arg;
|
||||
unsigned int cpu = hard_smp_processor_id();
|
||||
unsigned int offs = 4 * cpu;
|
||||
unsigned int x;
|
||||
|
||||
x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
|
||||
x &= (1 << (message << 2));
|
||||
ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
|
||||
|
||||
smp_message_recv(message);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void __init plat_smp_setup(void)
|
||||
{
|
||||
unsigned int cpu = 0;
|
||||
@@ -40,6 +56,13 @@ void __init plat_smp_setup(void)
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUILD_BUG_ON(SMP_MSG_NR >= 8);
|
||||
|
||||
for (i = 0; i < SMP_MSG_NR; i++)
|
||||
request_irq(104 + i, ipi_interrupt_handler, IRQF_DISABLED,
|
||||
"IPI", (void *)(long)i);
|
||||
}
|
||||
|
||||
#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
|
||||
@@ -59,7 +82,7 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
|
||||
ctrl_outl(STBCR_MSTP, STBCR_REG(cpu));
|
||||
|
||||
while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP))
|
||||
;
|
||||
cpu_relax();
|
||||
|
||||
/* Start up secondary processor by sending a reset */
|
||||
ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu));
|
||||
@@ -75,46 +98,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message)
|
||||
unsigned long addr = 0xfe410070 + (cpu * 4);
|
||||
|
||||
BUG_ON(cpu >= 4);
|
||||
BUG_ON(message >= SMP_MSG_NR);
|
||||
|
||||
ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
|
||||
}
|
||||
|
||||
struct ipi_data {
|
||||
void (*handler)(void *);
|
||||
void *arg;
|
||||
unsigned int message;
|
||||
};
|
||||
|
||||
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
|
||||
{
|
||||
struct ipi_data *id = arg;
|
||||
unsigned int cpu = hard_smp_processor_id();
|
||||
unsigned int offs = 4 * cpu;
|
||||
unsigned int x;
|
||||
|
||||
x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
|
||||
x &= (1 << (id->message << 2));
|
||||
ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
|
||||
|
||||
id->handler(id->arg);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct ipi_data ipi_handlers[SMP_MSG_NR];
|
||||
|
||||
int plat_register_ipi_handler(unsigned int message,
|
||||
void (*handler)(void *), void *arg)
|
||||
{
|
||||
struct ipi_data *id = &ipi_handlers[message];
|
||||
|
||||
BUG_ON(SMP_MSG_NR >= 8);
|
||||
BUG_ON(message >= SMP_MSG_NR);
|
||||
|
||||
id->handler = handler;
|
||||
id->arg = arg;
|
||||
id->message = message;
|
||||
|
||||
return request_irq(104 + message, ipi_interrupt_handler, 0, "IPI", id);
|
||||
}
|
||||
|
@@ -5,3 +5,8 @@ obj-y := entry.o probe.o switchto.o
|
||||
|
||||
obj-$(CONFIG_SH_FPU) += fpu.o
|
||||
obj-$(CONFIG_KALLSYMS) += unwind.o
|
||||
|
||||
# Primary on-chip clocks (common)
|
||||
clock-$(CONFIG_CPU_SH5) := clock-sh5.o
|
||||
|
||||
obj-y += $(clock-y)
|
||||
|
79
arch/sh/kernel/cpu/sh5/clock-sh5.c
Normal file
79
arch/sh/kernel/cpu/sh5/clock-sh5.c
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* arch/sh/kernel/cpu/sh5/clock-sh5.c
|
||||
*
|
||||
* SH-5 support for the clock framework
|
||||
*
|
||||
* Copyright (C) 2008 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/clock.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
|
||||
|
||||
/* Clock, Power and Reset Controller */
|
||||
#define CPRC_BLOCK_OFF 0x01010000
|
||||
#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
|
||||
|
||||
static unsigned long cprc_base;
|
||||
|
||||
static void master_clk_init(struct clk *clk)
|
||||
{
|
||||
int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007;
|
||||
clk->rate *= ifc_table[idx];
|
||||
}
|
||||
|
||||
static struct clk_ops sh5_master_clk_ops = {
|
||||
.init = master_clk_init,
|
||||
};
|
||||
|
||||
static void module_clk_recalc(struct clk *clk)
|
||||
{
|
||||
int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007;
|
||||
clk->rate = clk->parent->rate / ifc_table[idx];
|
||||
}
|
||||
|
||||
static struct clk_ops sh5_module_clk_ops = {
|
||||
.recalc = module_clk_recalc,
|
||||
};
|
||||
|
||||
static void bus_clk_recalc(struct clk *clk)
|
||||
{
|
||||
int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007;
|
||||
clk->rate = clk->parent->rate / ifc_table[idx];
|
||||
}
|
||||
|
||||
static struct clk_ops sh5_bus_clk_ops = {
|
||||
.recalc = bus_clk_recalc,
|
||||
};
|
||||
|
||||
static void cpu_clk_recalc(struct clk *clk)
|
||||
{
|
||||
int idx = (ctrl_inw(cprc_base) & 0x0007);
|
||||
clk->rate = clk->parent->rate / ifc_table[idx];
|
||||
}
|
||||
|
||||
static struct clk_ops sh5_cpu_clk_ops = {
|
||||
.recalc = cpu_clk_recalc,
|
||||
};
|
||||
|
||||
static struct clk_ops *sh5_clk_ops[] = {
|
||||
&sh5_master_clk_ops,
|
||||
&sh5_module_clk_ops,
|
||||
&sh5_bus_clk_ops,
|
||||
&sh5_cpu_clk_ops,
|
||||
};
|
||||
|
||||
void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
|
||||
{
|
||||
cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
|
||||
BUG_ON(!cprc_base);
|
||||
|
||||
if (idx < ARRAY_SIZE(sh5_clk_ops))
|
||||
*ops = sh5_clk_ops[idx];
|
||||
}
|
@@ -1,32 +0,0 @@
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
/*
|
||||
* Capture the user space registers if the task is not running (in user space)
|
||||
*/
|
||||
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
|
||||
{
|
||||
struct pt_regs ptregs;
|
||||
|
||||
ptregs = *task_pt_regs(tsk);
|
||||
elf_core_copy_regs(regs, &ptregs);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
|
||||
{
|
||||
int fpvalid = 0;
|
||||
|
||||
#if defined(CONFIG_SH_FPU)
|
||||
fpvalid = !!tsk_used_math(tsk);
|
||||
if (fpvalid) {
|
||||
unlazy_fpu(tsk, task_pt_regs(tsk));
|
||||
memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
|
||||
}
|
||||
#endif
|
||||
|
||||
return fpvalid;
|
||||
}
|
||||
|
@@ -371,3 +371,47 @@ syscall_exit:
|
||||
#endif
|
||||
7: .long do_syscall_trace_enter
|
||||
8: .long do_syscall_trace_leave
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
.align 2
|
||||
.globl _mcount
|
||||
.type _mcount,@function
|
||||
.globl mcount
|
||||
.type mcount,@function
|
||||
_mcount:
|
||||
mcount:
|
||||
mov.l r4, @-r15
|
||||
mov.l r5, @-r15
|
||||
mov.l r6, @-r15
|
||||
mov.l r7, @-r15
|
||||
sts.l pr, @-r15
|
||||
|
||||
mov.l @(20,r15),r4
|
||||
sts pr, r5
|
||||
|
||||
mov.l 1f, r6
|
||||
mov.l ftrace_stub, r7
|
||||
cmp/eq r6, r7
|
||||
bt skip_trace
|
||||
|
||||
mov.l @r6, r6
|
||||
jsr @r6
|
||||
nop
|
||||
|
||||
skip_trace:
|
||||
|
||||
lds.l @r15+, pr
|
||||
mov.l @r15+, r7
|
||||
mov.l @r15+, r6
|
||||
mov.l @r15+, r5
|
||||
rts
|
||||
mov.l @r15+, r4
|
||||
|
||||
.align 2
|
||||
1: .long ftrace_trace_function
|
||||
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
rts
|
||||
nop
|
||||
#endif /* CONFIG_FTRACE */
|
||||
|
498
arch/sh/kernel/gpio.c
Normal file
498
arch/sh/kernel/gpio.c
Normal file
@@ -0,0 +1,498 @@
|
||||
/*
|
||||
* Pinmuxed GPIO support for SuperH.
|
||||
*
|
||||
* Copyright (C) 2008 Magnus Damm
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/gpio.h>
|
||||
|
||||
static struct pinmux_info *registered_gpio;
|
||||
|
||||
static struct pinmux_info *gpio_controller(unsigned gpio)
|
||||
{
|
||||
if (!registered_gpio)
|
||||
return NULL;
|
||||
|
||||
if (gpio < registered_gpio->first_gpio)
|
||||
return NULL;
|
||||
|
||||
if (gpio > registered_gpio->last_gpio)
|
||||
return NULL;
|
||||
|
||||
return registered_gpio;
|
||||
}
|
||||
|
||||
static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
|
||||
{
|
||||
if (enum_id < r->begin)
|
||||
return 0;
|
||||
|
||||
if (enum_id > r->end)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int read_write_reg(unsigned long reg, unsigned long reg_width,
|
||||
unsigned long field_width, unsigned long in_pos,
|
||||
unsigned long value, int do_write)
|
||||
{
|
||||
unsigned long data, mask, pos;
|
||||
|
||||
data = 0;
|
||||
mask = (1 << field_width) - 1;
|
||||
pos = reg_width - ((in_pos + 1) * field_width);
|
||||
|
||||
#ifdef DEBUG
|
||||
pr_info("%s, addr = %lx, value = %ld, pos = %ld, "
|
||||
"r_width = %ld, f_width = %ld\n",
|
||||
do_write ? "write" : "read", reg, value, pos,
|
||||
reg_width, field_width);
|
||||
#endif
|
||||
|
||||
switch (reg_width) {
|
||||
case 8:
|
||||
data = ctrl_inb(reg);
|
||||
break;
|
||||
case 16:
|
||||
data = ctrl_inw(reg);
|
||||
break;
|
||||
case 32:
|
||||
data = ctrl_inl(reg);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!do_write)
|
||||
return (data >> pos) & mask;
|
||||
|
||||
data &= ~(mask << pos);
|
||||
data |= value << pos;
|
||||
|
||||
switch (reg_width) {
|
||||
case 8:
|
||||
ctrl_outb(data, reg);
|
||||
break;
|
||||
case 16:
|
||||
ctrl_outw(data, reg);
|
||||
break;
|
||||
case 32:
|
||||
ctrl_outl(data, reg);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
|
||||
struct pinmux_data_reg **drp, int *bitp)
|
||||
{
|
||||
pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
|
||||
struct pinmux_data_reg *data_reg;
|
||||
int k, n;
|
||||
|
||||
if (!enum_in_range(enum_id, &gpioc->data))
|
||||
return -1;
|
||||
|
||||
k = 0;
|
||||
while (1) {
|
||||
data_reg = gpioc->data_regs + k;
|
||||
|
||||
if (!data_reg->reg_width)
|
||||
break;
|
||||
|
||||
for (n = 0; n < data_reg->reg_width; n++) {
|
||||
if (data_reg->enum_ids[n] == enum_id) {
|
||||
*drp = data_reg;
|
||||
*bitp = n;
|
||||
return 0;
|
||||
|
||||
}
|
||||
}
|
||||
k++;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
|
||||
struct pinmux_cfg_reg **crp, int *indexp,
|
||||
unsigned long **cntp)
|
||||
{
|
||||
struct pinmux_cfg_reg *config_reg;
|
||||
unsigned long r_width, f_width;
|
||||
int k, n;
|
||||
|
||||
k = 0;
|
||||
while (1) {
|
||||
config_reg = gpioc->cfg_regs + k;
|
||||
|
||||
r_width = config_reg->reg_width;
|
||||
f_width = config_reg->field_width;
|
||||
|
||||
if (!r_width)
|
||||
break;
|
||||
for (n = 0; n < (r_width / f_width) * 1 << f_width; n++) {
|
||||
if (config_reg->enum_ids[n] == enum_id) {
|
||||
*crp = config_reg;
|
||||
*indexp = n;
|
||||
*cntp = &config_reg->cnt[n / (1 << f_width)];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
k++;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
|
||||
int pos, pinmux_enum_t *enum_idp)
|
||||
{
|
||||
pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
|
||||
pinmux_enum_t *data = gpioc->gpio_data;
|
||||
int k;
|
||||
|
||||
if (!enum_in_range(enum_id, &gpioc->data)) {
|
||||
if (!enum_in_range(enum_id, &gpioc->mark)) {
|
||||
pr_err("non data/mark enum_id for gpio %d\n", gpio);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pos) {
|
||||
*enum_idp = data[pos + 1];
|
||||
return pos + 1;
|
||||
}
|
||||
|
||||
for (k = 0; k < gpioc->gpio_data_size; k++) {
|
||||
if (data[k] == enum_id) {
|
||||
*enum_idp = data[k + 1];
|
||||
return k + 1;
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int write_config_reg(struct pinmux_info *gpioc,
|
||||
struct pinmux_cfg_reg *crp,
|
||||
int index)
|
||||
{
|
||||
unsigned long ncomb, pos, value;
|
||||
|
||||
ncomb = 1 << crp->field_width;
|
||||
pos = index / ncomb;
|
||||
value = index % ncomb;
|
||||
|
||||
return read_write_reg(crp->reg, crp->reg_width,
|
||||
crp->field_width, pos, value, 1);
|
||||
}
|
||||
|
||||
static int check_config_reg(struct pinmux_info *gpioc,
|
||||
struct pinmux_cfg_reg *crp,
|
||||
int index)
|
||||
{
|
||||
unsigned long ncomb, pos, value;
|
||||
|
||||
ncomb = 1 << crp->field_width;
|
||||
pos = index / ncomb;
|
||||
value = index % ncomb;
|
||||
|
||||
if (read_write_reg(crp->reg, crp->reg_width,
|
||||
crp->field_width, pos, 0, 0) == value)
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
|
||||
|
||||
int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
|
||||
int pinmux_type, int cfg_mode)
|
||||
{
|
||||
struct pinmux_cfg_reg *cr = NULL;
|
||||
pinmux_enum_t enum_id;
|
||||
struct pinmux_range *range;
|
||||
int in_range, pos, index;
|
||||
unsigned long *cntp;
|
||||
|
||||
switch (pinmux_type) {
|
||||
|
||||
case PINMUX_TYPE_FUNCTION:
|
||||
range = NULL;
|
||||
break;
|
||||
|
||||
case PINMUX_TYPE_OUTPUT:
|
||||
range = &gpioc->output;
|
||||
break;
|
||||
|
||||
case PINMUX_TYPE_INPUT:
|
||||
range = &gpioc->input;
|
||||
break;
|
||||
|
||||
case PINMUX_TYPE_INPUT_PULLUP:
|
||||
range = &gpioc->input_pu;
|
||||
break;
|
||||
|
||||
case PINMUX_TYPE_INPUT_PULLDOWN:
|
||||
range = &gpioc->input_pd;
|
||||
break;
|
||||
|
||||
default:
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
pos = 0;
|
||||
enum_id = 0;
|
||||
index = 0;
|
||||
while (1) {
|
||||
pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
|
||||
if (pos <= 0)
|
||||
goto out_err;
|
||||
|
||||
if (!enum_id)
|
||||
break;
|
||||
|
||||
in_range = enum_in_range(enum_id, &gpioc->function);
|
||||
if (!in_range && range)
|
||||
in_range = enum_in_range(enum_id, range);
|
||||
|
||||
if (!in_range)
|
||||
continue;
|
||||
|
||||
if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
|
||||
goto out_err;
|
||||
|
||||
switch (cfg_mode) {
|
||||
case GPIO_CFG_DRYRUN:
|
||||
if (!*cntp || !check_config_reg(gpioc, cr, index))
|
||||
continue;
|
||||
break;
|
||||
|
||||
case GPIO_CFG_REQ:
|
||||
if (write_config_reg(gpioc, cr, index) != 0)
|
||||
goto out_err;
|
||||
*cntp = *cntp + 1;
|
||||
break;
|
||||
|
||||
case GPIO_CFG_FREE:
|
||||
*cntp = *cntp - 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(gpio_lock);
|
||||
|
||||
int __gpio_request(unsigned gpio)
|
||||
{
|
||||
struct pinmux_info *gpioc = gpio_controller(gpio);
|
||||
struct pinmux_data_reg *dummy;
|
||||
unsigned long flags;
|
||||
int i, ret, pinmux_type;
|
||||
|
||||
ret = -EINVAL;
|
||||
|
||||
if (!gpioc)
|
||||
goto err_out;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if ((gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
|
||||
goto err_unlock;
|
||||
|
||||
/* setup pin function here if no data is associated with pin */
|
||||
|
||||
if (get_data_reg(gpioc, gpio, &dummy, &i) != 0)
|
||||
pinmux_type = PINMUX_TYPE_FUNCTION;
|
||||
else
|
||||
pinmux_type = PINMUX_TYPE_GPIO;
|
||||
|
||||
if (pinmux_type == PINMUX_TYPE_FUNCTION) {
|
||||
if (pinmux_config_gpio(gpioc, gpio,
|
||||
pinmux_type,
|
||||
GPIO_CFG_DRYRUN) != 0)
|
||||
goto err_unlock;
|
||||
|
||||
if (pinmux_config_gpio(gpioc, gpio,
|
||||
pinmux_type,
|
||||
GPIO_CFG_REQ) != 0)
|
||||
BUG();
|
||||
}
|
||||
|
||||
gpioc->gpios[gpio].flags = pinmux_type;
|
||||
|
||||
ret = 0;
|
||||
err_unlock:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__gpio_request);
|
||||
|
||||
void gpio_free(unsigned gpio)
|
||||
{
|
||||
struct pinmux_info *gpioc = gpio_controller(gpio);
|
||||
unsigned long flags;
|
||||
int pinmux_type;
|
||||
|
||||
if (!gpioc)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
|
||||
pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
|
||||
gpioc->gpios[gpio].flags = PINMUX_TYPE_NONE;
|
||||
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(gpio_free);
|
||||
|
||||
static int pinmux_direction(struct pinmux_info *gpioc,
|
||||
unsigned gpio, int new_pinmux_type)
|
||||
{
|
||||
int ret, pinmux_type;
|
||||
|
||||
ret = -EINVAL;
|
||||
pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
|
||||
|
||||
switch (pinmux_type) {
|
||||
case PINMUX_TYPE_GPIO:
|
||||
break;
|
||||
case PINMUX_TYPE_OUTPUT:
|
||||
case PINMUX_TYPE_INPUT:
|
||||
case PINMUX_TYPE_INPUT_PULLUP:
|
||||
case PINMUX_TYPE_INPUT_PULLDOWN:
|
||||
pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
|
||||
break;
|
||||
default:
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (pinmux_config_gpio(gpioc, gpio,
|
||||
new_pinmux_type,
|
||||
GPIO_CFG_DRYRUN) != 0)
|
||||
goto err_out;
|
||||
|
||||
if (pinmux_config_gpio(gpioc, gpio,
|
||||
new_pinmux_type,
|
||||
GPIO_CFG_REQ) != 0)
|
||||
BUG();
|
||||
|
||||
gpioc->gpios[gpio].flags = new_pinmux_type;
|
||||
|
||||
ret = 0;
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gpio_direction_input(unsigned gpio)
|
||||
{
|
||||
struct pinmux_info *gpioc = gpio_controller(gpio);
|
||||
unsigned long flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!gpioc)
|
||||
goto err_out;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_INPUT);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(gpio_direction_input);
|
||||
|
||||
static int __gpio_get_set_value(struct pinmux_info *gpioc,
|
||||
unsigned gpio, int value,
|
||||
int do_write)
|
||||
{
|
||||
struct pinmux_data_reg *dr = NULL;
|
||||
int bit = 0;
|
||||
|
||||
if (get_data_reg(gpioc, gpio, &dr, &bit) != 0)
|
||||
BUG();
|
||||
else
|
||||
value = read_write_reg(dr->reg, dr->reg_width,
|
||||
1, bit, value, do_write);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
int gpio_direction_output(unsigned gpio, int value)
|
||||
{
|
||||
struct pinmux_info *gpioc = gpio_controller(gpio);
|
||||
unsigned long flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!gpioc)
|
||||
goto err_out;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
__gpio_get_set_value(gpioc, gpio, value, 1);
|
||||
ret = pinmux_direction(gpioc, gpio, PINMUX_TYPE_OUTPUT);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(gpio_direction_output);
|
||||
|
||||
int gpio_get_value(unsigned gpio)
|
||||
{
|
||||
struct pinmux_info *gpioc = gpio_controller(gpio);
|
||||
unsigned long flags;
|
||||
int value = 0;
|
||||
|
||||
if (!gpioc)
|
||||
BUG();
|
||||
else {
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
value = __gpio_get_set_value(gpioc, gpio, 0, 0);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
EXPORT_SYMBOL(gpio_get_value);
|
||||
|
||||
void gpio_set_value(unsigned gpio, int value)
|
||||
{
|
||||
struct pinmux_info *gpioc = gpio_controller(gpio);
|
||||
unsigned long flags;
|
||||
|
||||
if (!gpioc)
|
||||
BUG();
|
||||
else {
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
__gpio_get_set_value(gpioc, gpio, value, 1);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(gpio_set_value);
|
||||
|
||||
int register_pinmux(struct pinmux_info *pip)
|
||||
{
|
||||
registered_gpio = pip;
|
||||
pr_info("pinmux: %s handling gpio %d -> %d\n",
|
||||
pip->name, pip->first_gpio, pip->last_gpio);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -19,12 +19,12 @@
|
||||
* Copy data from IO memory space to "real" memory space.
|
||||
* This needs to be optimized.
|
||||
*/
|
||||
void memcpy_fromio(void *to, volatile void __iomem *from, unsigned long count)
|
||||
void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
|
||||
{
|
||||
char *p = to;
|
||||
unsigned char *p = to;
|
||||
while (count) {
|
||||
count--;
|
||||
*p = readb((void __iomem *)from);
|
||||
*p = readb(from);
|
||||
p++;
|
||||
from++;
|
||||
}
|
||||
@@ -37,10 +37,10 @@ EXPORT_SYMBOL(memcpy_fromio);
|
||||
*/
|
||||
void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
|
||||
{
|
||||
const char *p = from;
|
||||
const unsigned char *p = from;
|
||||
while (count) {
|
||||
count--;
|
||||
writeb(*p, (void __iomem *)to);
|
||||
writeb(*p, to);
|
||||
p++;
|
||||
to++;
|
||||
}
|
||||
@@ -55,7 +55,7 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
|
||||
{
|
||||
while (count) {
|
||||
count--;
|
||||
writeb(c, (void __iomem *)dst);
|
||||
writeb(c, dst);
|
||||
dst++;
|
||||
}
|
||||
}
|
||||
|
@@ -19,38 +19,33 @@
|
||||
/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
|
||||
* workaround. */
|
||||
/* I'm not sure SH7709 has this kind of bug */
|
||||
#define dummy_read() ctrl_inb(0xba000000)
|
||||
#define dummy_read() __raw_readb(0xba000000)
|
||||
#else
|
||||
#define dummy_read()
|
||||
#endif
|
||||
|
||||
unsigned long generic_io_base;
|
||||
|
||||
static inline void delay(void)
|
||||
{
|
||||
ctrl_inw(0xa0000000);
|
||||
}
|
||||
|
||||
u8 generic_inb(unsigned long port)
|
||||
{
|
||||
return ctrl_inb((unsigned long __force)__ioport_map(port, 1));
|
||||
return __raw_readb(__ioport_map(port, 1));
|
||||
}
|
||||
|
||||
u16 generic_inw(unsigned long port)
|
||||
{
|
||||
return ctrl_inw((unsigned long __force)__ioport_map(port, 2));
|
||||
return __raw_readw(__ioport_map(port, 2));
|
||||
}
|
||||
|
||||
u32 generic_inl(unsigned long port)
|
||||
{
|
||||
return ctrl_inl((unsigned long __force)__ioport_map(port, 4));
|
||||
return __raw_readl(__ioport_map(port, 4));
|
||||
}
|
||||
|
||||
u8 generic_inb_p(unsigned long port)
|
||||
{
|
||||
unsigned long v = generic_inb(port);
|
||||
|
||||
delay();
|
||||
ctrl_delay();
|
||||
return v;
|
||||
}
|
||||
|
||||
@@ -58,7 +53,7 @@ u16 generic_inw_p(unsigned long port)
|
||||
{
|
||||
unsigned long v = generic_inw(port);
|
||||
|
||||
delay();
|
||||
ctrl_delay();
|
||||
return v;
|
||||
}
|
||||
|
||||
@@ -66,7 +61,7 @@ u32 generic_inl_p(unsigned long port)
|
||||
{
|
||||
unsigned long v = generic_inl(port);
|
||||
|
||||
delay();
|
||||
ctrl_delay();
|
||||
return v;
|
||||
}
|
||||
|
||||
@@ -81,7 +76,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count)
|
||||
volatile u8 *port_addr;
|
||||
u8 *buf = dst;
|
||||
|
||||
port_addr = (volatile u8 *)__ioport_map(port, 1);
|
||||
port_addr = (volatile u8 __force *)__ioport_map(port, 1);
|
||||
while (count--)
|
||||
*buf++ = *port_addr;
|
||||
}
|
||||
@@ -91,7 +86,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count)
|
||||
volatile u16 *port_addr;
|
||||
u16 *buf = dst;
|
||||
|
||||
port_addr = (volatile u16 *)__ioport_map(port, 2);
|
||||
port_addr = (volatile u16 __force *)__ioport_map(port, 2);
|
||||
while (count--)
|
||||
*buf++ = *port_addr;
|
||||
|
||||
@@ -103,7 +98,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
|
||||
volatile u32 *port_addr;
|
||||
u32 *buf = dst;
|
||||
|
||||
port_addr = (volatile u32 *)__ioport_map(port, 4);
|
||||
port_addr = (volatile u32 __force *)__ioport_map(port, 4);
|
||||
while (count--)
|
||||
*buf++ = *port_addr;
|
||||
|
||||
@@ -112,35 +107,35 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
|
||||
|
||||
void generic_outb(u8 b, unsigned long port)
|
||||
{
|
||||
ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1));
|
||||
__raw_writeb(b, __ioport_map(port, 1));
|
||||
}
|
||||
|
||||
void generic_outw(u16 b, unsigned long port)
|
||||
{
|
||||
ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2));
|
||||
__raw_writew(b, __ioport_map(port, 2));
|
||||
}
|
||||
|
||||
void generic_outl(u32 b, unsigned long port)
|
||||
{
|
||||
ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4));
|
||||
__raw_writel(b, __ioport_map(port, 4));
|
||||
}
|
||||
|
||||
void generic_outb_p(u8 b, unsigned long port)
|
||||
{
|
||||
generic_outb(b, port);
|
||||
delay();
|
||||
ctrl_delay();
|
||||
}
|
||||
|
||||
void generic_outw_p(u16 b, unsigned long port)
|
||||
{
|
||||
generic_outw(b, port);
|
||||
delay();
|
||||
ctrl_delay();
|
||||
}
|
||||
|
||||
void generic_outl_p(u32 b, unsigned long port)
|
||||
{
|
||||
generic_outl(b, port);
|
||||
delay();
|
||||
ctrl_delay();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -184,36 +179,6 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count)
|
||||
dummy_read();
|
||||
}
|
||||
|
||||
u8 generic_readb(void __iomem *addr)
|
||||
{
|
||||
return ctrl_inb((unsigned long __force)addr);
|
||||
}
|
||||
|
||||
u16 generic_readw(void __iomem *addr)
|
||||
{
|
||||
return ctrl_inw((unsigned long __force)addr);
|
||||
}
|
||||
|
||||
u32 generic_readl(void __iomem *addr)
|
||||
{
|
||||
return ctrl_inl((unsigned long __force)addr);
|
||||
}
|
||||
|
||||
void generic_writeb(u8 b, void __iomem *addr)
|
||||
{
|
||||
ctrl_outb(b, (unsigned long __force)addr);
|
||||
}
|
||||
|
||||
void generic_writew(u16 b, void __iomem *addr)
|
||||
{
|
||||
ctrl_outw(b, (unsigned long __force)addr);
|
||||
}
|
||||
|
||||
void generic_writel(u32 b, void __iomem *addr)
|
||||
{
|
||||
ctrl_outl(b, (unsigned long __force)addr);
|
||||
}
|
||||
|
||||
void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
|
||||
{
|
||||
return (void __iomem *)(addr + generic_io_base);
|
||||
|
584
arch/sh/kernel/kprobes.c
Normal file
584
arch/sh/kernel/kprobes.c
Normal file
@@ -0,0 +1,584 @@
|
||||
/*
|
||||
* Kernel probes (kprobes) for SuperH
|
||||
*
|
||||
* Copyright (C) 2007 Chris Smith <chris.smith@st.com>
|
||||
* Copyright (C) 2006 Lineo Solutions, Inc.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
static struct kprobe saved_current_opcode;
|
||||
static struct kprobe saved_next_opcode;
|
||||
static struct kprobe saved_next_opcode2;
|
||||
|
||||
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
|
||||
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
|
||||
#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
|
||||
#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
|
||||
#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
|
||||
#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
|
||||
|
||||
#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
|
||||
#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
|
||||
|
||||
#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
|
||||
#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
|
||||
|
||||
#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
|
||||
#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
|
||||
|
||||
if (OPCODE_RTE(opcode))
|
||||
return -EFAULT; /* Bad breakpoint */
|
||||
|
||||
p->opcode = opcode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
{
|
||||
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
p->opcode = *p->addr;
|
||||
}
|
||||
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
*p->addr = BREAKPOINT_INSTRUCTION;
|
||||
flush_icache_range((unsigned long)p->addr,
|
||||
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
|
||||
}
|
||||
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
*p->addr = p->opcode;
|
||||
flush_icache_range((unsigned long)p->addr,
|
||||
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
|
||||
}
|
||||
|
||||
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (*p->addr == BREAKPOINT_INSTRUCTION)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* If an illegal slot instruction exception occurs for an address
|
||||
* containing a kprobe, remove the probe.
|
||||
*
|
||||
* Returns 0 if the exception was handled successfully, 1 otherwise.
|
||||
*/
|
||||
int __kprobes kprobe_handle_illslot(unsigned long pc)
|
||||
{
|
||||
struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
|
||||
|
||||
if (p != NULL) {
|
||||
printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
|
||||
(unsigned int)pc + 2);
|
||||
unregister_kprobe(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (saved_next_opcode.addr != 0x0) {
|
||||
arch_disarm_kprobe(p);
|
||||
arch_disarm_kprobe(&saved_next_opcode);
|
||||
saved_next_opcode.addr = 0x0;
|
||||
saved_next_opcode.opcode = 0x0;
|
||||
|
||||
if (saved_next_opcode2.addr != 0x0) {
|
||||
arch_disarm_kprobe(&saved_next_opcode2);
|
||||
saved_next_opcode2.addr = 0x0;
|
||||
saved_next_opcode2.opcode = 0x0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
}
|
||||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
}
|
||||
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Singlestep is implemented by disabling the current kprobe and setting one
|
||||
* on the next instruction, following branches. Two probes are set if the
|
||||
* branch is conditional.
|
||||
*/
|
||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
kprobe_opcode_t *addr = NULL;
|
||||
saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc);
|
||||
addr = saved_current_opcode.addr;
|
||||
|
||||
if (p != NULL) {
|
||||
arch_disarm_kprobe(p);
|
||||
|
||||
if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
|
||||
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
|
||||
saved_next_opcode.addr =
|
||||
(kprobe_opcode_t *) regs->regs[reg_nr];
|
||||
} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
|
||||
unsigned long disp = (p->opcode & 0x0FFF);
|
||||
saved_next_opcode.addr =
|
||||
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
|
||||
|
||||
} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
|
||||
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
|
||||
saved_next_opcode.addr =
|
||||
(kprobe_opcode_t *) (regs->pc + 4 +
|
||||
regs->regs[reg_nr]);
|
||||
|
||||
} else if (OPCODE_RTS(p->opcode)) {
|
||||
saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr;
|
||||
|
||||
} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
|
||||
unsigned long disp = (p->opcode & 0x00FF);
|
||||
/* case 1 */
|
||||
saved_next_opcode.addr = p->addr + 1;
|
||||
/* case 2 */
|
||||
saved_next_opcode2.addr =
|
||||
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
|
||||
saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
|
||||
arch_arm_kprobe(&saved_next_opcode2);
|
||||
|
||||
} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
|
||||
unsigned long disp = (p->opcode & 0x00FF);
|
||||
/* case 1 */
|
||||
saved_next_opcode.addr = p->addr + 2;
|
||||
/* case 2 */
|
||||
saved_next_opcode2.addr =
|
||||
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
|
||||
saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
|
||||
arch_arm_kprobe(&saved_next_opcode2);
|
||||
|
||||
} else {
|
||||
saved_next_opcode.addr = p->addr + 1;
|
||||
}
|
||||
|
||||
saved_next_opcode.opcode = *(saved_next_opcode.addr);
|
||||
arch_arm_kprobe(&saved_next_opcode);
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with kretprobe_lock held */
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->pr = (unsigned long)kretprobe_trampoline;
|
||||
}
|
||||
|
||||
static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p;
|
||||
int ret = 0;
|
||||
kprobe_opcode_t *addr = NULL;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
/*
|
||||
* We don't want to be preempted for the entire
|
||||
* duration of kprobe processing
|
||||
*/
|
||||
preempt_disable();
|
||||
kcb = get_kprobe_ctlblk();
|
||||
|
||||
addr = (kprobe_opcode_t *) (regs->pc);
|
||||
|
||||
/* Check we're not actually recursing */
|
||||
if (kprobe_running()) {
|
||||
p = get_kprobe(addr);
|
||||
if (p) {
|
||||
if (kcb->kprobe_status == KPROBE_HIT_SS &&
|
||||
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
|
||||
goto no_kprobe;
|
||||
}
|
||||
/* We have reentered the kprobe_handler(), since
|
||||
* another probe was hit while within the handler.
|
||||
* We here save the original kprobes variables and
|
||||
* just single step on the instruction of the new probe
|
||||
* without calling any user handlers.
|
||||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
} else {
|
||||
p = __get_cpu_var(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs)) {
|
||||
goto ss_probe;
|
||||
}
|
||||
}
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
p = get_kprobe(addr);
|
||||
if (!p) {
|
||||
/* Not one of ours: let kernel handle it */
|
||||
if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
|
||||
/*
|
||||
* The breakpoint instruction was removed right
|
||||
* after we hit it. Another cpu has removed
|
||||
* either a probepoint or a debugger breakpoint
|
||||
* at this address. In either case, no further
|
||||
* handling of this interrupt is appropriate.
|
||||
*/
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
|
||||
if (p->pre_handler && p->pre_handler(p, regs))
|
||||
/* handler has already set things up, so skip ss setup */
|
||||
return 1;
|
||||
|
||||
ss_probe:
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
return 1;
|
||||
|
||||
no_kprobe:
|
||||
preempt_enable_no_resched();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* For function-return probes, init_kprobes() establishes a probepoint
|
||||
* here. When a retprobed function returns, this probe is hit and
|
||||
* trampoline_probe_handler() runs, calling the kretprobe's handler.
|
||||
*/
|
||||
static void __used kretprobe_trampoline_holder(void)
|
||||
{
|
||||
asm volatile (".globl kretprobe_trampoline\n"
|
||||
"kretprobe_trampoline:\n\t"
|
||||
"nop\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when we hit the probe point at kretprobe_trampoline
|
||||
*/
|
||||
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
|
||||
/*
|
||||
* It is possible to have multiple instances associated with a given
|
||||
* task either because an multiple functions in the call path
|
||||
* have a return probe installed on them, and/or more then one return
|
||||
* return probe was registered for a target function.
|
||||
*
|
||||
* We can handle this because:
|
||||
* - instances are always inserted at the head of the list
|
||||
* - when multiple return probes are registered for the same
|
||||
* function, the first instance's ret_addr will point to the
|
||||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__get_cpu_var(current_kprobe) = &ri->rp->kp;
|
||||
ri->rp->handler(ri, regs);
|
||||
__get_cpu_var(current_kprobe) = NULL;
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
regs->pc = orig_ret_address;
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
||||
return orig_ret_address;
|
||||
}
|
||||
|
||||
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
kprobe_opcode_t *addr = NULL;
|
||||
struct kprobe *p = NULL;
|
||||
|
||||
if (!cur)
|
||||
return 0;
|
||||
|
||||
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
if (saved_next_opcode.addr != 0x0) {
|
||||
arch_disarm_kprobe(&saved_next_opcode);
|
||||
saved_next_opcode.addr = 0x0;
|
||||
saved_next_opcode.opcode = 0x0;
|
||||
|
||||
addr = saved_current_opcode.addr;
|
||||
saved_current_opcode.addr = 0x0;
|
||||
|
||||
p = get_kprobe(addr);
|
||||
arch_arm_kprobe(p);
|
||||
|
||||
if (saved_next_opcode2.addr != 0x0) {
|
||||
arch_disarm_kprobe(&saved_next_opcode2);
|
||||
saved_next_opcode2.addr = 0x0;
|
||||
saved_next_opcode2.opcode = 0x0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Restore back the original saved kprobes variables and continue. */
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
reset_current_kprobe();
|
||||
|
||||
out:
|
||||
preempt_enable_no_resched();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
/*
|
||||
* We are here because the instruction being single
|
||||
* stepped caused a page fault. We reset the current
|
||||
* kprobe, point the pc back to the probe address
|
||||
* and allow the page fault handler to continue as a
|
||||
* normal page fault.
|
||||
*/
|
||||
regs->pc = (unsigned long)cur->addr;
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
break;
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
case KPROBE_HIT_SSDONE:
|
||||
/*
|
||||
* We increment the nmissed count for accounting,
|
||||
* we can also use npre/npostfault count for accounting
|
||||
* these specific fault cases.
|
||||
*/
|
||||
kprobes_inc_nmissed_count(cur);
|
||||
|
||||
/*
|
||||
* We come here because instructions in the pre/post
|
||||
* handler caused the page_fault, this could happen
|
||||
* if handler tries to access user space by
|
||||
* copy_from_user(), get_user() etc. Let the
|
||||
* user-specified handler try to fix it first.
|
||||
*/
|
||||
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* In case the user-specified fault handler returned
|
||||
* zero, try to fix up.
|
||||
*/
|
||||
if ((entry = search_exception_tables(regs->pc)) != NULL) {
|
||||
regs->pc = entry->fixup;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_exception() could not handle it,
|
||||
* Let do_page_fault() fix it.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper routine to for handling exceptions.
|
||||
*/
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct kprobe *p = NULL;
|
||||
struct die_args *args = (struct die_args *)data;
|
||||
int ret = NOTIFY_DONE;
|
||||
kprobe_opcode_t *addr = NULL;
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
addr = (kprobe_opcode_t *) (args->regs->pc);
|
||||
if (val == DIE_TRAP) {
|
||||
if (!kprobe_running()) {
|
||||
if (kprobe_handler(args->regs)) {
|
||||
ret = NOTIFY_STOP;
|
||||
} else {
|
||||
/* Not a kprobe trap */
|
||||
ret = NOTIFY_DONE;
|
||||
}
|
||||
} else {
|
||||
p = get_kprobe(addr);
|
||||
if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
|
||||
(kcb->kprobe_status == KPROBE_REENTER)) {
|
||||
if (post_kprobe_handler(args->regs))
|
||||
ret = NOTIFY_STOP;
|
||||
} else {
|
||||
if (kprobe_handler(args->regs)) {
|
||||
ret = NOTIFY_STOP;
|
||||
} else {
|
||||
p = __get_cpu_var(current_kprobe);
|
||||
if (p->break_handler &&
|
||||
p->break_handler(p, args->regs))
|
||||
ret = NOTIFY_STOP;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
unsigned long addr;
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
kcb->jprobe_saved_regs = *regs;
|
||||
kcb->jprobe_saved_r15 = regs->regs[15];
|
||||
addr = kcb->jprobe_saved_r15;
|
||||
|
||||
/*
|
||||
* TBD: As Linus pointed out, gcc assumes that the callee
|
||||
* owns the argument space and could overwrite it, e.g.
|
||||
* tailcall optimization. So, to be absolutely safe
|
||||
* we also save and restore enough stack bytes to cover
|
||||
* the argument area.
|
||||
*/
|
||||
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
|
||||
MIN_STACK_SIZE(addr));
|
||||
|
||||
regs->pc = (unsigned long)(jp->entry);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __kprobes jprobe_return(void)
|
||||
{
|
||||
asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
|
||||
}
|
||||
|
||||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long stack_addr = kcb->jprobe_saved_r15;
|
||||
u8 *addr = (u8 *)regs->pc;
|
||||
|
||||
if ((addr >= (u8 *)jprobe_return) &&
|
||||
(addr <= (u8 *)jprobe_return_end)) {
|
||||
*regs = kcb->jprobe_saved_regs;
|
||||
|
||||
memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
|
||||
MIN_STACK_SIZE(stack_addr));
|
||||
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
|
||||
.pre_handler = trampoline_probe_handler
|
||||
};
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
saved_next_opcode.addr = 0x0;
|
||||
saved_next_opcode.opcode = 0x0;
|
||||
|
||||
saved_current_opcode.addr = 0x0;
|
||||
saved_current_opcode.opcode = 0x0;
|
||||
|
||||
saved_next_opcode2.addr = 0x0;
|
||||
saved_next_opcode2.opcode = 0x0;
|
||||
|
||||
return register_kprobe(&trampoline_p);
|
||||
}
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
@@ -125,9 +126,6 @@ void __init sh_mv_setup(void)
|
||||
mv_set(insb); mv_set(insw); mv_set(insl);
|
||||
mv_set(outsb); mv_set(outsw); mv_set(outsl);
|
||||
|
||||
mv_set(readb); mv_set(readw); mv_set(readl);
|
||||
mv_set(writeb); mv_set(writew); mv_set(writel);
|
||||
|
||||
mv_set(ioport_map);
|
||||
mv_set(ioport_unmap);
|
||||
mv_set(irq_demux);
|
||||
|
@@ -7,7 +7,11 @@
|
||||
*
|
||||
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
|
||||
* Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
|
||||
* Copyright (C) 2002 - 2007 Paul Mundt
|
||||
* Copyright (C) 2002 - 2008 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -26,6 +30,7 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/ubc.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
static int hlt_counter;
|
||||
int ubc_usercnt = 0;
|
||||
@@ -111,15 +116,21 @@ void show_regs(struct pt_regs * regs)
|
||||
{
|
||||
printk("\n");
|
||||
printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
|
||||
printk("CPU : %d %s (%s %.*s)\n",
|
||||
smp_processor_id(), print_tainted(), init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
|
||||
print_symbol("PC is at %s\n", instruction_pointer(regs));
|
||||
print_symbol("PR is at %s\n", regs->pr);
|
||||
|
||||
printk("PC : %08lx SP : %08lx SR : %08lx ",
|
||||
regs->pc, regs->regs[15], regs->sr);
|
||||
#ifdef CONFIG_MMU
|
||||
printk("TEA : %08x ", ctrl_inl(MMU_TEA));
|
||||
printk("TEA : %08x\n", ctrl_inl(MMU_TEA));
|
||||
#else
|
||||
printk(" ");
|
||||
printk("\n");
|
||||
#endif
|
||||
printk("%s\n", print_tainted());
|
||||
|
||||
printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
|
||||
regs->regs[0],regs->regs[1],
|
||||
@@ -162,6 +173,7 @@ __asm__(".align 5\n"
|
||||
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
int pid;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
regs.regs[4] = (unsigned long)arg;
|
||||
@@ -171,8 +183,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
regs.sr = (1 << 30);
|
||||
|
||||
/* Ok, create the new process.. */
|
||||
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
|
||||
®s, 0, NULL, NULL);
|
||||
pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
|
||||
®s, 0, NULL, NULL);
|
||||
|
||||
trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -210,10 +226,10 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
fpvalid = !!tsk_used_math(tsk);
|
||||
if (fpvalid) {
|
||||
unlazy_fpu(tsk, regs);
|
||||
memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
|
||||
}
|
||||
if (fpvalid)
|
||||
fpvalid = !fpregs_get(tsk, NULL, 0,
|
||||
sizeof(struct user_fpu_struct),
|
||||
fpu, NULL);
|
||||
#endif
|
||||
|
||||
return fpvalid;
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
@@ -395,6 +396,7 @@ ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
|
||||
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
int pid;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
regs.regs[2] = (unsigned long)arg;
|
||||
@@ -403,8 +405,13 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
regs.pc = (unsigned long)kernel_thread_helper;
|
||||
regs.sr = (1 << 30);
|
||||
|
||||
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
|
||||
®s, 0, NULL, NULL);
|
||||
/* Ok, create the new process.. */
|
||||
pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
|
||||
®s, 0, NULL, NULL);
|
||||
|
||||
trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1,12 +1,14 @@
|
||||
/*
|
||||
* linux/arch/sh/kernel/ptrace.c
|
||||
* SuperH process tracing
|
||||
*
|
||||
* Original x86 implementation:
|
||||
* By Ross Biro 1/23/92
|
||||
* edited by Linus Torvalds
|
||||
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2008 Paul Mundt
|
||||
*
|
||||
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
|
||||
* Audit support: Yuichi Nakamura <ynakam@hitachisoft.jp>
|
||||
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
@@ -22,16 +24,15 @@
|
||||
#include <linux/audit.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/regset.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/*
|
||||
* does not yet catch signals sent when the child dies.
|
||||
* in exit.c or in signal.c.
|
||||
*/
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
/*
|
||||
* This routine will get a word off of the process kernel stack.
|
||||
@@ -61,16 +62,12 @@ static inline int put_stack_long(struct task_struct *task, int offset,
|
||||
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
long pc;
|
||||
|
||||
pc = get_stack_long(child, (long)®s->pc);
|
||||
|
||||
/* Next scheduling will set up UBC */
|
||||
if (child->thread.ubc_pc == 0)
|
||||
ubc_usercnt += 1;
|
||||
|
||||
child->thread.ubc_pc = pc;
|
||||
child->thread.ubc_pc = get_stack_long(child,
|
||||
offsetof(struct pt_regs, pc));
|
||||
|
||||
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
}
|
||||
@@ -102,9 +99,213 @@ void ptrace_disable(struct task_struct *child)
|
||||
user_disable_single_step(child);
|
||||
}
|
||||
|
||||
static int genregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
const struct pt_regs *regs = task_pt_regs(target);
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
regs->regs,
|
||||
0, 16 * sizeof(unsigned long));
|
||||
if (!ret)
|
||||
/* PC, PR, SR, GBR, MACH, MACL, TRA */
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
®s->pc,
|
||||
offsetof(struct pt_regs, pc),
|
||||
sizeof(struct pt_regs));
|
||||
if (!ret)
|
||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(struct pt_regs), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int genregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(target);
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
regs->regs,
|
||||
0, 16 * sizeof(unsigned long));
|
||||
if (!ret && count > 0)
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
®s->pc,
|
||||
offsetof(struct pt_regs, pc),
|
||||
sizeof(struct pt_regs));
|
||||
if (!ret)
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(struct pt_regs), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SH_FPU
|
||||
int fpregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = init_fpu(target);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((boot_cpu_data.flags & CPU_HAS_FPU))
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.hard, 0, -1);
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.soft, 0, -1);
|
||||
}
|
||||
|
||||
static int fpregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = init_fpu(target);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_stopped_child_used_math(target);
|
||||
|
||||
if ((boot_cpu_data.flags & CPU_HAS_FPU))
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.hard, 0, -1);
|
||||
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.soft, 0, -1);
|
||||
}
|
||||
|
||||
static int fpregs_active(struct task_struct *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
return tsk_used_math(target) ? regset->n : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SH_DSP
|
||||
static int dspregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
const struct pt_dspregs *regs = task_pt_dspregs(target);
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
|
||||
0, sizeof(struct pt_dspregs));
|
||||
if (!ret)
|
||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(struct pt_dspregs), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dspregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct pt_dspregs *regs = task_pt_dspregs(target);
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
|
||||
0, sizeof(struct pt_dspregs));
|
||||
if (!ret)
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(struct pt_dspregs), -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dspregs_active(struct task_struct *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(target);
|
||||
|
||||
return regs->sr & SR_DSP ? regset->n : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These are our native regset flavours.
|
||||
*/
|
||||
enum sh_regset {
|
||||
REGSET_GENERAL,
|
||||
#ifdef CONFIG_SH_FPU
|
||||
REGSET_FPU,
|
||||
#endif
|
||||
#ifdef CONFIG_SH_DSP
|
||||
REGSET_DSP,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct user_regset sh_regsets[] = {
|
||||
/*
|
||||
* Format is:
|
||||
* R0 --> R15
|
||||
* PC, PR, SR, GBR, MACH, MACL, TRA
|
||||
*/
|
||||
[REGSET_GENERAL] = {
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
.n = ELF_NGREG,
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = genregs_get,
|
||||
.set = genregs_set,
|
||||
},
|
||||
|
||||
#ifdef CONFIG_SH_FPU
|
||||
[REGSET_FPU] = {
|
||||
.core_note_type = NT_PRFPREG,
|
||||
.n = sizeof(struct user_fpu_struct) / sizeof(long),
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = fpregs_get,
|
||||
.set = fpregs_set,
|
||||
.active = fpregs_active,
|
||||
},
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SH_DSP
|
||||
[REGSET_DSP] = {
|
||||
.n = sizeof(struct pt_dspregs) / sizeof(long),
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = dspregs_get,
|
||||
.set = dspregs_set,
|
||||
.active = dspregs_active,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_sh_native_view = {
|
||||
.name = "sh",
|
||||
.e_machine = EM_SH,
|
||||
.regsets = sh_regsets,
|
||||
.n = ARRAY_SIZE(sh_regsets),
|
||||
};
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
return &user_sh_native_view;
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
struct user * dummy = NULL;
|
||||
unsigned long __user *datap = (unsigned long __user *)data;
|
||||
int ret;
|
||||
|
||||
switch (request) {
|
||||
@@ -133,7 +334,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
tmp = !!tsk_used_math(child);
|
||||
else
|
||||
tmp = 0;
|
||||
ret = put_user(tmp, (unsigned long __user *)data);
|
||||
ret = put_user(tmp, datap);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -157,34 +358,39 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
}
|
||||
break;
|
||||
|
||||
case PTRACE_GETREGS:
|
||||
return copy_regset_to_user(child, &user_sh_native_view,
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct pt_regs),
|
||||
(void __user *)data);
|
||||
case PTRACE_SETREGS:
|
||||
return copy_regset_from_user(child, &user_sh_native_view,
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct pt_regs),
|
||||
(const void __user *)data);
|
||||
#ifdef CONFIG_SH_FPU
|
||||
case PTRACE_GETFPREGS:
|
||||
return copy_regset_to_user(child, &user_sh_native_view,
|
||||
REGSET_FPU,
|
||||
0, sizeof(struct user_fpu_struct),
|
||||
(void __user *)data);
|
||||
case PTRACE_SETFPREGS:
|
||||
return copy_regset_from_user(child, &user_sh_native_view,
|
||||
REGSET_FPU,
|
||||
0, sizeof(struct user_fpu_struct),
|
||||
(const void __user *)data);
|
||||
#endif
|
||||
#ifdef CONFIG_SH_DSP
|
||||
case PTRACE_GETDSPREGS: {
|
||||
unsigned long dp;
|
||||
|
||||
ret = -EIO;
|
||||
dp = ((unsigned long) child) + THREAD_SIZE -
|
||||
sizeof(struct pt_dspregs);
|
||||
if (*((int *) (dp - 4)) == SR_FD) {
|
||||
copy_to_user((void *)addr, (void *) dp,
|
||||
sizeof(struct pt_dspregs));
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETDSPREGS: {
|
||||
unsigned long dp;
|
||||
|
||||
ret = -EIO;
|
||||
dp = ((unsigned long) child) + THREAD_SIZE -
|
||||
sizeof(struct pt_dspregs);
|
||||
if (*((int *) (dp - 4)) == SR_FD) {
|
||||
copy_from_user((void *) dp, (void *)addr,
|
||||
sizeof(struct pt_dspregs));
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PTRACE_GETDSPREGS:
|
||||
return copy_regset_to_user(child, &user_sh_native_view,
|
||||
REGSET_DSP,
|
||||
0, sizeof(struct pt_dspregs),
|
||||
(void __user *)data);
|
||||
case PTRACE_SETDSPREGS:
|
||||
return copy_regset_from_user(child, &user_sh_native_view,
|
||||
REGSET_DSP,
|
||||
0, sizeof(struct pt_dspregs),
|
||||
(const void __user *)data);
|
||||
#endif
|
||||
#ifdef CONFIG_BINFMT_ELF_FDPIC
|
||||
case PTRACE_GETFDPIC: {
|
||||
@@ -202,7 +408,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (put_user(tmp, (unsigned long *) data)) {
|
||||
if (put_user(tmp, datap)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
@@ -35,6 +35,7 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
/* This mask defines the bits of the SR which the user is not allowed to
|
||||
|
@@ -26,6 +26,9 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/page.h>
|
||||
@@ -144,6 +147,7 @@ static void __init reserve_crashkernel(void)
|
||||
{
|
||||
unsigned long long free_mem;
|
||||
unsigned long long crash_size, crash_base;
|
||||
void *vp;
|
||||
int ret;
|
||||
|
||||
free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
|
||||
@@ -152,12 +156,14 @@ static void __init reserve_crashkernel(void)
|
||||
&crash_size, &crash_base);
|
||||
if (ret == 0 && crash_size) {
|
||||
if (crash_base <= 0) {
|
||||
printk(KERN_INFO "crashkernel reservation failed - "
|
||||
"you have to specify a base address\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (reserve_bootmem(crash_base, crash_size,
|
||||
vp = alloc_bootmem_nopanic(crash_size);
|
||||
if (!vp) {
|
||||
printk(KERN_INFO "crashkernel allocation "
|
||||
"failed\n");
|
||||
return;
|
||||
}
|
||||
crash_base = __pa(vp);
|
||||
} else if (reserve_bootmem(crash_base, crash_size,
|
||||
BOOTMEM_EXCLUSIVE) < 0) {
|
||||
printk(KERN_INFO "crashkernel reservation failed - "
|
||||
"memory is in use\n");
|
||||
@@ -179,6 +185,24 @@ static inline void __init reserve_crashkernel(void)
|
||||
{}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
|
||||
void __cpuinit calibrate_delay(void)
|
||||
{
|
||||
struct clk *clk = clk_get(NULL, "cpu_clk");
|
||||
|
||||
if (IS_ERR(clk))
|
||||
panic("Need a sane CPU clock definition!");
|
||||
|
||||
loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
|
||||
|
||||
printk(KERN_INFO "Calibrating delay loop (skipped)... "
|
||||
"%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
|
||||
loops_per_jiffy/(500000/HZ),
|
||||
(loops_per_jiffy/(5000/HZ)) % 100,
|
||||
loops_per_jiffy);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
@@ -232,15 +256,17 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
|
||||
* case of us accidentally initializing the bootmem allocator with
|
||||
* an invalid RAM area.
|
||||
*/
|
||||
reserve_bootmem(__MEMORY_START+PAGE_SIZE,
|
||||
(PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START,
|
||||
BOOTMEM_DEFAULT);
|
||||
reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
|
||||
(PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
|
||||
(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
|
||||
BOOTMEM_DEFAULT);
|
||||
|
||||
/*
|
||||
* reserve physical page 0 - it's a special BIOS page on many boxes,
|
||||
* enabling clean reboots, SMP operation, laptop functions.
|
||||
*/
|
||||
reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
|
||||
reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
|
||||
BOOTMEM_DEFAULT);
|
||||
|
||||
sparse_memory_present_with_active_regions(0);
|
||||
|
||||
@@ -248,17 +274,18 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
|
||||
ROOT_DEV = Root_RAM0;
|
||||
|
||||
if (LOADER_TYPE && INITRD_START) {
|
||||
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
|
||||
reserve_bootmem(INITRD_START + __MEMORY_START,
|
||||
INITRD_SIZE, BOOTMEM_DEFAULT);
|
||||
initrd_start = INITRD_START + PAGE_OFFSET +
|
||||
__MEMORY_START;
|
||||
unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
|
||||
|
||||
if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
|
||||
reserve_bootmem(initrd_start_phys, INITRD_SIZE,
|
||||
BOOTMEM_DEFAULT);
|
||||
initrd_start = (unsigned long)__va(initrd_start_phys);
|
||||
initrd_end = initrd_start + INITRD_SIZE;
|
||||
} else {
|
||||
printk("initrd extends beyond end of memory "
|
||||
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
||||
INITRD_START + INITRD_SIZE,
|
||||
max_low_pfn << PAGE_SHIFT);
|
||||
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
||||
initrd_start_phys + INITRD_SIZE,
|
||||
(unsigned long)PFN_PHYS(max_low_pfn));
|
||||
initrd_start = 0;
|
||||
}
|
||||
}
|
||||
@@ -530,6 +557,8 @@ struct dentry *sh_debugfs_root;
|
||||
static int __init sh_debugfs_init(void)
|
||||
{
|
||||
sh_debugfs_root = debugfs_create_dir("sh", NULL);
|
||||
if (!sh_debugfs_root)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(sh_debugfs_root))
|
||||
return PTR_ERR(sh_debugfs_root);
|
||||
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <asm/delay.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
|
||||
extern struct hw_interrupt_type no_irq_type;
|
||||
@@ -133,6 +134,9 @@ EXPORT_SYMBOL(__flush_purge_region);
|
||||
EXPORT_SYMBOL(clear_user_page);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
EXPORT_SYMBOL(mcount);
|
||||
#endif
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
EXPORT_SYMBOL(csum_partial_copy_generic);
|
||||
#ifdef CONFIG_IPV6
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
@@ -215,6 +216,9 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
|
||||
sigset_t set;
|
||||
int r0;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
||||
@@ -247,9 +251,11 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
|
||||
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
||||
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
|
||||
sigset_t set;
|
||||
stack_t st;
|
||||
int r0;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
||||
@@ -265,11 +271,9 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
|
||||
goto badframe;
|
||||
|
||||
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
|
||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL,
|
||||
regs->regs[15]) == -EFAULT)
|
||||
goto badframe;
|
||||
/* It is more difficult to avoid calling this function than to
|
||||
call it and ignore errors. */
|
||||
do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
|
||||
|
||||
return r0;
|
||||
|
||||
@@ -429,7 +433,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
|
||||
/* Create the ucontext. */
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(NULL, &frame->uc.uc_link);
|
||||
err |= __put_user((void *)current->sas_ss_sp,
|
||||
&frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->regs[15]),
|
||||
@@ -492,37 +496,43 @@ give_sigsegv:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static inline void
|
||||
handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
|
||||
struct sigaction *sa)
|
||||
{
|
||||
/* If we're not from a syscall, bail out */
|
||||
if (regs->tra < 0)
|
||||
return;
|
||||
|
||||
/* check for system call restart.. */
|
||||
switch (regs->regs[0]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
case -ERESTARTNOHAND:
|
||||
no_system_call_restart:
|
||||
regs->regs[0] = -EINTR;
|
||||
regs->sr |= 1;
|
||||
break;
|
||||
|
||||
case -ERESTARTSYS:
|
||||
if (!(sa->sa_flags & SA_RESTART))
|
||||
goto no_system_call_restart;
|
||||
/* fallthrough */
|
||||
case -ERESTARTNOINTR:
|
||||
regs->regs[0] = save_r0;
|
||||
regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we're invoking a handler
|
||||
*/
|
||||
|
||||
static int
|
||||
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Are we from a system call? */
|
||||
if (regs->tra >= 0) {
|
||||
/* If so, check system call restarting.. */
|
||||
switch (regs->regs[0]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
case -ERESTARTNOHAND:
|
||||
no_system_call_restart:
|
||||
regs->regs[0] = -EINTR;
|
||||
break;
|
||||
|
||||
case -ERESTARTSYS:
|
||||
if (!(ka->sa.sa_flags & SA_RESTART))
|
||||
goto no_system_call_restart;
|
||||
/* fallthrough */
|
||||
case -ERESTARTNOINTR:
|
||||
regs->regs[0] = save_r0;
|
||||
regs->pc -= instruction_size(
|
||||
ctrl_inw(regs->pc - 4));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the stack frame */
|
||||
if (ka->sa.sa_flags & SA_SIGINFO)
|
||||
@@ -580,6 +590,9 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
|
||||
|
||||
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
||||
if (signr > 0) {
|
||||
if (regs->sr & 1)
|
||||
handle_syscall_restart(save_r0, regs, &ka.sa);
|
||||
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (handle_signal(signr, &ka, &info, oldset,
|
||||
regs, save_r0) == 0) {
|
||||
|
@@ -43,6 +43,10 @@
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
static void
|
||||
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
||||
sigset_t *oldset, struct pt_regs * regs);
|
||||
|
||||
/*
|
||||
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
@@ -371,6 +375,9 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
|
||||
sigset_t set;
|
||||
long long ret;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
||||
@@ -408,6 +415,9 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
|
||||
stack_t __user st;
|
||||
long long ret;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
||||
@@ -535,7 +545,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
|
||||
* On SH5 all edited pointers are subject to NEFF
|
||||
*/
|
||||
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
} else {
|
||||
/*
|
||||
* Different approach on SH5.
|
||||
@@ -550,10 +560,10 @@ static void setup_frame(int sig, struct k_sigaction *ka,
|
||||
*/
|
||||
DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
|
||||
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
|
||||
if (__copy_to_user(frame->retcode,
|
||||
(unsigned long long)sa_default_restorer & (~1), 16) != 0)
|
||||
(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Cohere the trampoline with the I-cache. */
|
||||
@@ -566,7 +576,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
|
||||
*/
|
||||
regs->regs[REG_SP] = (unsigned long) frame;
|
||||
regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
|
||||
(regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
|
||||
(regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
|
||||
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
|
||||
|
||||
/* FIXME:
|
||||
@@ -652,7 +662,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
* On SH5 all edited pointers are subject to NEFF
|
||||
*/
|
||||
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
} else {
|
||||
/*
|
||||
* Different approach on SH5.
|
||||
@@ -668,10 +678,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
|
||||
DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
|
||||
DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
(DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
|
||||
|
||||
if (__copy_to_user(frame->retcode,
|
||||
(unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
|
||||
(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
|
||||
goto give_sigsegv;
|
||||
|
||||
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
|
||||
@@ -683,7 +693,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
*/
|
||||
regs->regs[REG_SP] = (unsigned long) frame;
|
||||
regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
|
||||
(regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
|
||||
(regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
|
||||
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
|
||||
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
|
||||
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
|
||||
|
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* SMP support for the SuperH processors.
|
||||
*
|
||||
* Copyright (C) 2002 - 2007 Paul Mundt
|
||||
* Copyright (C) 2002 - 2008 Paul Mundt
|
||||
* Copyright (C) 2006 - 2007 Akio Idehara
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
@@ -86,9 +86,12 @@ asmlinkage void __cpuinit start_secondary(void)
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
/* Enable local timers */
|
||||
local_timer_setup(cpu);
|
||||
calibrate_delay();
|
||||
|
||||
cpu = smp_processor_id();
|
||||
smp_store_cpu_info(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
@@ -186,6 +189,42 @@ void arch_send_call_function_single_ipi(int cpu)
|
||||
plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
|
||||
}
|
||||
|
||||
void smp_timer_broadcast(cpumask_t mask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
plat_send_ipi(cpu, SMP_MSG_TIMER);
|
||||
}
|
||||
|
||||
static void ipi_timer(void)
|
||||
{
|
||||
irq_enter();
|
||||
local_timer_interrupt();
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void smp_message_recv(unsigned int msg)
|
||||
{
|
||||
switch (msg) {
|
||||
case SMP_MSG_FUNCTION:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
case SMP_MSG_RESCHEDULE:
|
||||
break;
|
||||
case SMP_MSG_FUNCTION_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
case SMP_MSG_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
|
||||
smp_processor_id(), __func__, msg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Not really SMP stuff ... */
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
{
|
||||
|
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* Stack trace management functions
|
||||
*
|
||||
* Copyright (C) 2006 Paul Mundt
|
||||
* Copyright (C) 2006 - 2008 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
@@ -36,3 +36,24 @@ void save_stack_trace(struct stack_trace *trace)
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||
|
||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||
{
|
||||
unsigned long *sp = (unsigned long *)tsk->thread.sp;
|
||||
|
||||
while (!kstack_end(sp)) {
|
||||
unsigned long addr = *sp++;
|
||||
|
||||
if (__kernel_text_address(addr)) {
|
||||
if (in_sched_functions(addr))
|
||||
break;
|
||||
if (trace->skip > 0)
|
||||
trace->skip--;
|
||||
else
|
||||
trace->entries[trace->nr_entries++] = addr;
|
||||
if (trace->nr_entries >= trace->max_entries)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/ipc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
@@ -170,6 +171,8 @@ asmlinkage int sys_ipc(uint call, int first, int second,
|
||||
version = call >> 16; /* hack for backward compatibility */
|
||||
call &= 0xffff;
|
||||
|
||||
trace_mark(kernel_arch_ipc_call, "call %u first %d", call, first);
|
||||
|
||||
if (call <= SEMTIMEDOP)
|
||||
switch (call) {
|
||||
case SEMOP:
|
||||
@@ -186,7 +189,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
|
||||
union semun fourth;
|
||||
if (!ptr)
|
||||
return -EINVAL;
|
||||
if (get_user(fourth.__pad, (void * __user *) ptr))
|
||||
if (get_user(fourth.__pad, (void __user * __user *) ptr))
|
||||
return -EFAULT;
|
||||
return sys_semctl (first, second, third, fourth);
|
||||
}
|
||||
@@ -261,13 +264,13 @@ asmlinkage int sys_ipc(uint call, int first, int second,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
asmlinkage int sys_uname(struct old_utsname * name)
|
||||
asmlinkage int sys_uname(struct old_utsname __user *name)
|
||||
{
|
||||
int err;
|
||||
if (!name)
|
||||
return -EFAULT;
|
||||
down_read(&uts_sem);
|
||||
err = copy_to_user(name, utsname(), sizeof (*name));
|
||||
err = copy_to_user(name, utsname(), sizeof(*name));
|
||||
up_read(&uts_sem);
|
||||
return err?-EFAULT:0;
|
||||
}
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
/*
|
||||
* sys_pipe() is the normal C calling standard for creating
|
||||
@@ -37,13 +38,13 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
|
||||
return error;
|
||||
}
|
||||
|
||||
asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
|
||||
asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
|
||||
size_t count, long dummy, loff_t pos)
|
||||
{
|
||||
return sys_pread64(fd, buf, count, pos);
|
||||
}
|
||||
|
||||
asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
|
||||
asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
|
||||
size_t count, long dummy, loff_t pos)
|
||||
{
|
||||
return sys_pwrite64(fd, buf, count, pos);
|
||||
|
@@ -1,9 +1,9 @@
|
||||
/*
|
||||
* arch/sh/kernel/time.c
|
||||
* arch/sh/kernel/time_32.c
|
||||
*
|
||||
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
|
||||
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
|
||||
* Copyright (C) 2002 - 2007 Paul Mundt
|
||||
* Copyright (C) 2002 - 2008 Paul Mundt
|
||||
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
|
||||
*
|
||||
* Some code taken from i386 version.
|
||||
@@ -16,6 +16,8 @@
|
||||
#include <linux/timex.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/mc146818rtc.h> /* for rtc_lock */
|
||||
#include <linux/smp.h>
|
||||
#include <asm/clock.h>
|
||||
#include <asm/rtc.h>
|
||||
#include <asm/timer.h>
|
||||
@@ -253,6 +255,10 @@ void __init time_init(void)
|
||||
set_normalized_timespec(&wall_to_monotonic,
|
||||
-xtime.tv_sec, -xtime.tv_nsec);
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
local_timer_setup(smp_processor_id());
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find the timer to use as the system timer, it will be
|
||||
* initialized for us.
|
||||
@@ -260,6 +266,7 @@ void __init time_init(void)
|
||||
sys_timer = get_sys_timer();
|
||||
printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
|
||||
|
||||
|
||||
if (sys_timer->ops->read)
|
||||
clocksource_sh.read = sys_timer->ops->read;
|
||||
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/clock.h>
|
||||
|
||||
#define TMU_TOCR_INIT 0x00
|
||||
#define TMU0_TCR_INIT 0x0020
|
||||
@@ -51,14 +52,6 @@
|
||||
#define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */
|
||||
#define RTC_RCR1 (rtc_base + 0x38)
|
||||
|
||||
/* Clock, Power and Reset Controller */
|
||||
#define CPRC_BLOCK_OFF 0x01010000
|
||||
#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
|
||||
|
||||
#define FRQCR (cprc_base+0x0)
|
||||
#define WTCSR (cprc_base+0x0018)
|
||||
#define STBCR (cprc_base+0x0030)
|
||||
|
||||
/* Time Management Unit */
|
||||
#define TMU_BLOCK_OFF 0x01020000
|
||||
#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
|
||||
@@ -293,103 +286,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
static __init unsigned int get_cpu_hz(void)
|
||||
{
|
||||
unsigned int count;
|
||||
unsigned long __dummy;
|
||||
unsigned long ctc_val_init, ctc_val;
|
||||
|
||||
/*
|
||||
** Regardless the toolchain, force the compiler to use the
|
||||
** arbitrary register r3 as a clock tick counter.
|
||||
** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
|
||||
*/
|
||||
register unsigned long long __rtc_irq_flag __asm__ ("r3");
|
||||
|
||||
local_irq_enable();
|
||||
do {} while (ctrl_inb(rtc_base) != 0);
|
||||
ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
|
||||
|
||||
/*
|
||||
* r3 is arbitrary. CDC does not support "=z".
|
||||
*/
|
||||
ctc_val_init = 0xffffffff;
|
||||
ctc_val = ctc_val_init;
|
||||
|
||||
asm volatile("gettr tr0, %1\n\t"
|
||||
"putcon %0, " __CTC "\n\t"
|
||||
"and %2, r63, %2\n\t"
|
||||
"pta $+4, tr0\n\t"
|
||||
"beq/l %2, r63, tr0\n\t"
|
||||
"ptabs %1, tr0\n\t"
|
||||
"getcon " __CTC ", %0\n\t"
|
||||
: "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
|
||||
: "0" (0));
|
||||
local_irq_disable();
|
||||
/*
|
||||
* SH-3:
|
||||
* CPU clock = 4 stages * loop
|
||||
* tst rm,rm if id ex
|
||||
* bt/s 1b if id ex
|
||||
* add #1,rd if id ex
|
||||
* (if) pipe line stole
|
||||
* tst rm,rm if id ex
|
||||
* ....
|
||||
*
|
||||
*
|
||||
* SH-4:
|
||||
* CPU clock = 6 stages * loop
|
||||
* I don't know why.
|
||||
* ....
|
||||
*
|
||||
* SH-5:
|
||||
* Use CTC register to count. This approach returns the right value
|
||||
* even if the I-cache is disabled (e.g. whilst debugging.)
|
||||
*
|
||||
*/
|
||||
|
||||
count = ctc_val_init - ctc_val; /* CTC counts down */
|
||||
|
||||
/*
|
||||
* This really is count by the number of clock cycles
|
||||
* by the ratio between a complete R64CNT
|
||||
* wrap-around (128) and CUI interrupt being raised (64).
|
||||
*/
|
||||
return count*2;
|
||||
}
|
||||
|
||||
static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
|
||||
ctrl_outb(0, RTC_RCR1); /* Disable Carry Interrupts */
|
||||
regs->regs[3] = 1; /* Using r3 */
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction irq0 = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "timer",
|
||||
};
|
||||
static struct irqaction irq1 = {
|
||||
.handler = sh64_rtc_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "rtc",
|
||||
};
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
unsigned int cpu_clock, master_clock, bus_clock, module_clock;
|
||||
unsigned long interval;
|
||||
unsigned long frqcr, ifc, pfc;
|
||||
static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
|
||||
#define bfc_table ifc_table /* Same */
|
||||
#define pfc_table ifc_table /* Same */
|
||||
struct clk *clk;
|
||||
|
||||
tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
|
||||
if (!tmu_base) {
|
||||
@@ -401,50 +308,19 @@ void __init time_init(void)
|
||||
panic("Unable to remap RTC\n");
|
||||
}
|
||||
|
||||
cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
|
||||
if (!cprc_base) {
|
||||
panic("Unable to remap CPRC\n");
|
||||
}
|
||||
clk = clk_get(NULL, "cpu_clk");
|
||||
scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) /
|
||||
(unsigned long long)(clk_get_rate(clk) / HZ));
|
||||
|
||||
rtc_sh_get_time(&xtime);
|
||||
|
||||
setup_irq(TIMER_IRQ, &irq0);
|
||||
setup_irq(RTC_IRQ, &irq1);
|
||||
|
||||
/* Check how fast it is.. */
|
||||
cpu_clock = get_cpu_hz();
|
||||
|
||||
/* Note careful order of operations to maintain reasonable precision and avoid overflow. */
|
||||
scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
|
||||
|
||||
free_irq(RTC_IRQ, NULL);
|
||||
|
||||
printk("CPU clock: %d.%02dMHz\n",
|
||||
(cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
|
||||
{
|
||||
unsigned short bfc;
|
||||
frqcr = ctrl_inl(FRQCR);
|
||||
ifc = ifc_table[(frqcr>> 6) & 0x0007];
|
||||
bfc = bfc_table[(frqcr>> 3) & 0x0007];
|
||||
pfc = pfc_table[(frqcr>> 12) & 0x0007];
|
||||
master_clock = cpu_clock * ifc;
|
||||
bus_clock = master_clock/bfc;
|
||||
}
|
||||
|
||||
printk("Bus clock: %d.%02dMHz\n",
|
||||
(bus_clock/1000000), (bus_clock % 1000000)/10000);
|
||||
module_clock = master_clock/pfc;
|
||||
printk("Module clock: %d.%02dMHz\n",
|
||||
(module_clock/1000000), (module_clock % 1000000)/10000);
|
||||
interval = (module_clock/(HZ*4));
|
||||
clk = clk_get(NULL, "module_clk");
|
||||
interval = (clk_get_rate(clk)/(HZ*4));
|
||||
|
||||
printk("Interval = %ld\n", interval);
|
||||
|
||||
current_cpu_data.cpu_clock = cpu_clock;
|
||||
current_cpu_data.master_clock = master_clock;
|
||||
current_cpu_data.bus_clock = bus_clock;
|
||||
current_cpu_data.module_clock = module_clock;
|
||||
|
||||
/* Start TMU0 */
|
||||
ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
|
||||
ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
|
||||
@@ -454,36 +330,6 @@ void __init time_init(void)
|
||||
ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
|
||||
}
|
||||
|
||||
void enter_deep_standby(void)
|
||||
{
|
||||
/* Disable watchdog timer */
|
||||
ctrl_outl(0xa5000000, WTCSR);
|
||||
/* Configure deep standby on sleep */
|
||||
ctrl_outl(0x03, STBCR);
|
||||
|
||||
#ifdef CONFIG_SH_ALPHANUMERIC
|
||||
{
|
||||
extern void mach_alphanum(int position, unsigned char value);
|
||||
extern void mach_alphanum_brightness(int setting);
|
||||
char halted[] = "Halted. ";
|
||||
int i;
|
||||
mach_alphanum_brightness(6); /* dimmest setting above off */
|
||||
for (i=0; i<8; i++) {
|
||||
mach_alphanum(i, halted[i]);
|
||||
}
|
||||
asm __volatile__ ("synco");
|
||||
}
|
||||
#endif
|
||||
|
||||
asm __volatile__ ("sleep");
|
||||
asm __volatile__ ("synci");
|
||||
asm __volatile__ ("nop");
|
||||
asm __volatile__ ("nop");
|
||||
asm __volatile__ ("nop");
|
||||
asm __volatile__ ("nop");
|
||||
panic("Unexpected wakeup!\n");
|
||||
}
|
||||
|
||||
static struct resource rtc_resources[] = {
|
||||
[0] = {
|
||||
/* RTC base, filled in by rtc_init */
|
||||
|
@@ -8,3 +8,4 @@ obj-$(CONFIG_SH_TMU) += timer-tmu.o
|
||||
obj-$(CONFIG_SH_MTU2) += timer-mtu2.o
|
||||
obj-$(CONFIG_SH_CMT) += timer-cmt.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += timer-broadcast.o
|
||||
|
57
arch/sh/kernel/timers/timer-broadcast.c
Normal file
57
arch/sh/kernel/timers/timer-broadcast.c
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Dummy local timer
|
||||
*
|
||||
* Copyright (C) 2008 Paul Mundt
|
||||
*
|
||||
* cloned from:
|
||||
*
|
||||
* linux/arch/arm/mach-realview/localtimer.c
|
||||
*
|
||||
* Copyright (C) 2002 ARM Ltd.
|
||||
* All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
|
||||
|
||||
/*
|
||||
* Used on SMP for either the local timer or SMP_MSG_TIMER
|
||||
*/
|
||||
void local_timer_interrupt(void)
|
||||
{
|
||||
struct clock_event_device *clk = &__get_cpu_var(local_clockevent);
|
||||
|
||||
clk->event_handler(clk);
|
||||
}
|
||||
|
||||
static void dummy_timer_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *clk)
|
||||
{
|
||||
}
|
||||
|
||||
void __cpuinit local_timer_setup(unsigned int cpu)
|
||||
{
|
||||
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
|
||||
|
||||
clk->name = "dummy_timer";
|
||||
clk->features = CLOCK_EVT_FEAT_DUMMY;
|
||||
clk->rating = 200;
|
||||
clk->mult = 1;
|
||||
clk->set_mode = dummy_timer_set_mode;
|
||||
clk->broadcast = smp_timer_broadcast;
|
||||
clk->cpumask = cpumask_of_cpu(cpu);
|
||||
|
||||
clockevents_register_device(clk);
|
||||
}
|
@@ -174,7 +174,7 @@ static int cmt_timer_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sys_timer_ops cmt_timer_ops = {
|
||||
static struct sys_timer_ops cmt_timer_ops = {
|
||||
.init = cmt_timer_init,
|
||||
.start = cmt_timer_start,
|
||||
.stop = cmt_timer_stop,
|
||||
|
@@ -28,43 +28,90 @@
|
||||
#define TMU_TOCR_INIT 0x00
|
||||
#define TMU_TCR_INIT 0x0020
|
||||
|
||||
#define TMU0 (0)
|
||||
#define TMU1 (1)
|
||||
|
||||
static inline void _tmu_start(int tmu_num)
|
||||
{
|
||||
ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR);
|
||||
}
|
||||
|
||||
static inline void _tmu_set_irq(int tmu_num, int enabled)
|
||||
{
|
||||
register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
|
||||
ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr);
|
||||
}
|
||||
|
||||
static inline void _tmu_stop(int tmu_num)
|
||||
{
|
||||
ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR);
|
||||
}
|
||||
|
||||
static inline void _tmu_clear_status(int tmu_num)
|
||||
{
|
||||
register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
|
||||
/* Clear UNF bit */
|
||||
ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr);
|
||||
}
|
||||
|
||||
static inline unsigned long _tmu_read(int tmu_num)
|
||||
{
|
||||
return ctrl_inl(TMU0_TCNT+0xC*tmu_num);
|
||||
}
|
||||
|
||||
static int tmu_timer_start(void)
|
||||
{
|
||||
ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR);
|
||||
_tmu_start(TMU0);
|
||||
_tmu_start(TMU1);
|
||||
_tmu_set_irq(TMU0,1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
|
||||
static int tmu_timer_stop(void)
|
||||
{
|
||||
ctrl_outl(interval, TMU0_TCNT);
|
||||
_tmu_stop(TMU0);
|
||||
_tmu_stop(TMU1);
|
||||
_tmu_clear_status(TMU0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* also when the module_clk is scaled the TMU1
|
||||
* will show the same frequency
|
||||
*/
|
||||
static int tmus_are_scaled;
|
||||
|
||||
static cycle_t tmu_timer_read(void)
|
||||
{
|
||||
return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
|
||||
}
|
||||
|
||||
|
||||
static unsigned long tmu_latest_interval[3];
|
||||
static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload)
|
||||
{
|
||||
unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC;
|
||||
unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC;
|
||||
|
||||
_tmu_stop(tmu_num);
|
||||
|
||||
ctrl_outl(interval, tmu_tcnt);
|
||||
tmu_latest_interval[tmu_num] = interval;
|
||||
|
||||
/*
|
||||
* TCNT reloads from TCOR on underflow, clear it if we don't
|
||||
* intend to auto-reload
|
||||
*/
|
||||
if (reload)
|
||||
ctrl_outl(interval, TMU0_TCOR);
|
||||
else
|
||||
ctrl_outl(0, TMU0_TCOR);
|
||||
ctrl_outl( reload ? interval : 0 , tmu_tcor);
|
||||
|
||||
tmu_timer_start();
|
||||
}
|
||||
|
||||
static int tmu_timer_stop(void)
|
||||
{
|
||||
ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~0x3, TMU_012_TSTR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t tmu_timer_read(void)
|
||||
{
|
||||
return ~ctrl_inl(TMU1_TCNT);
|
||||
_tmu_start(tmu_num);
|
||||
}
|
||||
|
||||
static int tmu_set_next_event(unsigned long cycles,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
tmu0_timer_set_interval(cycles, 1);
|
||||
tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC);
|
||||
_tmu_set_irq(TMU0,1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -96,12 +143,8 @@ static struct clock_event_device tmu0_clockevent = {
|
||||
static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
|
||||
{
|
||||
struct clock_event_device *evt = &tmu0_clockevent;
|
||||
unsigned long timer_status;
|
||||
|
||||
/* Clear UNF bit */
|
||||
timer_status = ctrl_inw(TMU0_TCR);
|
||||
timer_status &= ~0x100;
|
||||
ctrl_outw(timer_status, TMU0_TCR);
|
||||
_tmu_clear_status(TMU0);
|
||||
_tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT);
|
||||
|
||||
evt->event_handler(evt);
|
||||
|
||||
@@ -109,56 +152,73 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
|
||||
}
|
||||
|
||||
static struct irqaction tmu0_irq = {
|
||||
.name = "periodic timer",
|
||||
.name = "periodic/oneshot timer",
|
||||
.handler = tmu_timer_interrupt,
|
||||
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
|
||||
.mask = CPU_MASK_NONE,
|
||||
};
|
||||
|
||||
static void tmu0_clk_init(struct clk *clk)
|
||||
static void __init tmu_clk_init(struct clk *clk)
|
||||
{
|
||||
u8 divisor = TMU_TCR_INIT & 0x7;
|
||||
ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
|
||||
clk->rate = clk->parent->rate / (4 << (divisor << 1));
|
||||
u8 divisor = TMU_TCR_INIT & 0x7;
|
||||
int tmu_num = clk->name[3]-'0';
|
||||
ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC));
|
||||
clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
|
||||
}
|
||||
|
||||
static void tmu0_clk_recalc(struct clk *clk)
|
||||
static void tmu_clk_recalc(struct clk *clk)
|
||||
{
|
||||
u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
|
||||
clk->rate = clk->parent->rate / (4 << (divisor << 1));
|
||||
int tmu_num = clk->name[3]-'0';
|
||||
unsigned long prev_rate = clk_get_rate(clk);
|
||||
unsigned long flags;
|
||||
u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7;
|
||||
clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
|
||||
|
||||
if(prev_rate==clk_get_rate(clk))
|
||||
return;
|
||||
|
||||
if(tmu_num)
|
||||
return; /* No more work on TMU1 */
|
||||
|
||||
local_irq_save(flags);
|
||||
tmus_are_scaled = (prev_rate > clk->rate);
|
||||
|
||||
_tmu_stop(TMU0);
|
||||
|
||||
tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC,
|
||||
tmu0_clockevent.shift);
|
||||
tmu0_clockevent.max_delta_ns =
|
||||
clockevent_delta2ns(-1, &tmu0_clockevent);
|
||||
tmu0_clockevent.min_delta_ns =
|
||||
clockevent_delta2ns(1, &tmu0_clockevent);
|
||||
|
||||
if (tmus_are_scaled)
|
||||
tmu_latest_interval[TMU0] >>= 1;
|
||||
else
|
||||
tmu_latest_interval[TMU0] <<= 1;
|
||||
|
||||
tmu_timer_set_interval(TMU0,
|
||||
tmu_latest_interval[TMU0],
|
||||
tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC);
|
||||
|
||||
_tmu_start(TMU0);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static struct clk_ops tmu0_clk_ops = {
|
||||
.init = tmu0_clk_init,
|
||||
.recalc = tmu0_clk_recalc,
|
||||
static struct clk_ops tmu_clk_ops = {
|
||||
.init = tmu_clk_init,
|
||||
.recalc = tmu_clk_recalc,
|
||||
};
|
||||
|
||||
static struct clk tmu0_clk = {
|
||||
.name = "tmu0_clk",
|
||||
.ops = &tmu0_clk_ops,
|
||||
};
|
||||
|
||||
static void tmu1_clk_init(struct clk *clk)
|
||||
{
|
||||
u8 divisor = TMU_TCR_INIT & 0x7;
|
||||
ctrl_outw(divisor, TMU1_TCR);
|
||||
clk->rate = clk->parent->rate / (4 << (divisor << 1));
|
||||
}
|
||||
|
||||
static void tmu1_clk_recalc(struct clk *clk)
|
||||
{
|
||||
u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
|
||||
clk->rate = clk->parent->rate / (4 << (divisor << 1));
|
||||
}
|
||||
|
||||
static struct clk_ops tmu1_clk_ops = {
|
||||
.init = tmu1_clk_init,
|
||||
.recalc = tmu1_clk_recalc,
|
||||
.ops = &tmu_clk_ops,
|
||||
};
|
||||
|
||||
static struct clk tmu1_clk = {
|
||||
.name = "tmu1_clk",
|
||||
.ops = &tmu1_clk_ops,
|
||||
.ops = &tmu_clk_ops,
|
||||
};
|
||||
|
||||
static int tmu_timer_init(void)
|
||||
@@ -189,11 +249,12 @@ static int tmu_timer_init(void)
|
||||
frequency = clk_get_rate(&tmu0_clk);
|
||||
interval = (frequency + HZ / 2) / HZ;
|
||||
|
||||
sh_hpt_frequency = clk_get_rate(&tmu1_clk);
|
||||
ctrl_outl(~0, TMU1_TCNT);
|
||||
ctrl_outl(~0, TMU1_TCOR);
|
||||
tmu_timer_set_interval(TMU0,interval, 1);
|
||||
tmu_timer_set_interval(TMU1,~0,1);
|
||||
|
||||
tmu0_timer_set_interval(interval, 1);
|
||||
_tmu_start(TMU1);
|
||||
|
||||
sh_hpt_frequency = clk_get_rate(&tmu1_clk);
|
||||
|
||||
tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
|
||||
tmu0_clockevent.shift);
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
#ifdef CONFIG_SH_KGDB
|
||||
#include <asm/kgdb.h>
|
||||
@@ -192,6 +193,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
|
||||
int ret, index, count;
|
||||
unsigned long *rm, *rn;
|
||||
unsigned char *src, *dst;
|
||||
unsigned char __user *srcu, *dstu;
|
||||
|
||||
index = (instruction>>8)&15; /* 0x0F00 */
|
||||
rn = ®s->regs[index];
|
||||
@@ -206,28 +208,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
|
||||
case 0: /* mov.[bwl] to/from memory via r0+rn */
|
||||
if (instruction & 8) {
|
||||
/* from memory */
|
||||
src = (unsigned char*) *rm;
|
||||
src += regs->regs[0];
|
||||
dst = (unsigned char*) rn;
|
||||
*(unsigned long*)dst = 0;
|
||||
srcu = (unsigned char __user *)*rm;
|
||||
srcu += regs->regs[0];
|
||||
dst = (unsigned char *)rn;
|
||||
*(unsigned long *)dst = 0;
|
||||
|
||||
#if !defined(__LITTLE_ENDIAN__)
|
||||
dst += 4-count;
|
||||
#endif
|
||||
if (ma->from(dst, src, count))
|
||||
if (ma->from(dst, srcu, count))
|
||||
goto fetch_fault;
|
||||
|
||||
sign_extend(count, dst);
|
||||
} else {
|
||||
/* to memory */
|
||||
src = (unsigned char*) rm;
|
||||
src = (unsigned char *)rm;
|
||||
#if !defined(__LITTLE_ENDIAN__)
|
||||
src += 4-count;
|
||||
#endif
|
||||
dst = (unsigned char*) *rn;
|
||||
dst += regs->regs[0];
|
||||
dstu = (unsigned char __user *)*rn;
|
||||
dstu += regs->regs[0];
|
||||
|
||||
if (ma->to(dst, src, count))
|
||||
if (ma->to(dstu, src, count))
|
||||
goto fetch_fault;
|
||||
}
|
||||
ret = 0;
|
||||
@@ -235,10 +237,10 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
|
||||
|
||||
case 1: /* mov.l Rm,@(disp,Rn) */
|
||||
src = (unsigned char*) rm;
|
||||
dst = (unsigned char*) *rn;
|
||||
dst += (instruction&0x000F)<<2;
|
||||
dstu = (unsigned char __user *)*rn;
|
||||
dstu += (instruction&0x000F)<<2;
|
||||
|
||||
if (ma->to(dst, src, 4))
|
||||
if (ma->to(dstu, src, 4))
|
||||
goto fetch_fault;
|
||||
ret = 0;
|
||||
break;
|
||||
@@ -247,28 +249,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
|
||||
if (instruction & 4)
|
||||
*rn -= count;
|
||||
src = (unsigned char*) rm;
|
||||
dst = (unsigned char*) *rn;
|
||||
dstu = (unsigned char __user *)*rn;
|
||||
#if !defined(__LITTLE_ENDIAN__)
|
||||
src += 4-count;
|
||||
#endif
|
||||
if (ma->to(dst, src, count))
|
||||
if (ma->to(dstu, src, count))
|
||||
goto fetch_fault;
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case 5: /* mov.l @(disp,Rm),Rn */
|
||||
src = (unsigned char*) *rm;
|
||||
src += (instruction&0x000F)<<2;
|
||||
dst = (unsigned char*) rn;
|
||||
*(unsigned long*)dst = 0;
|
||||
srcu = (unsigned char __user *)*rm;
|
||||
srcu += (instruction & 0x000F) << 2;
|
||||
dst = (unsigned char *)rn;
|
||||
*(unsigned long *)dst = 0;
|
||||
|
||||
if (ma->from(dst, src, 4))
|
||||
if (ma->from(dst, srcu, 4))
|
||||
goto fetch_fault;
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case 6: /* mov.[bwl] from memory, possibly with post-increment */
|
||||
src = (unsigned char*) *rm;
|
||||
srcu = (unsigned char __user *)*rm;
|
||||
if (instruction & 4)
|
||||
*rm += count;
|
||||
dst = (unsigned char*) rn;
|
||||
@@ -277,7 +279,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
|
||||
#if !defined(__LITTLE_ENDIAN__)
|
||||
dst += 4-count;
|
||||
#endif
|
||||
if (ma->from(dst, src, count))
|
||||
if (ma->from(dst, srcu, count))
|
||||
goto fetch_fault;
|
||||
sign_extend(count, dst);
|
||||
ret = 0;
|
||||
@@ -286,28 +288,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
|
||||
case 8:
|
||||
switch ((instruction&0xFF00)>>8) {
|
||||
case 0x81: /* mov.w R0,@(disp,Rn) */
|
||||
src = (unsigned char*) ®s->regs[0];
|
||||
src = (unsigned char *) ®s->regs[0];
|
||||
#if !defined(__LITTLE_ENDIAN__)
|
||||
src += 2;
|
||||
#endif
|
||||
dst = (unsigned char*) *rm; /* called Rn in the spec */
|
||||
dst += (instruction&0x000F)<<1;
|
||||
dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
|
||||
dstu += (instruction & 0x000F) << 1;
|
||||
|
||||
if (ma->to(dst, src, 2))
|
||||
if (ma->to(dstu, src, 2))
|
||||
goto fetch_fault;
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case 0x85: /* mov.w @(disp,Rm),R0 */
|
||||
src = (unsigned char*) *rm;
|
||||
src += (instruction&0x000F)<<1;
|
||||
dst = (unsigned char*) ®s->regs[0];
|
||||
*(unsigned long*)dst = 0;
|
||||
srcu = (unsigned char __user *)*rm;
|
||||
srcu += (instruction & 0x000F) << 1;
|
||||
dst = (unsigned char *) ®s->regs[0];
|
||||
*(unsigned long *)dst = 0;
|
||||
|
||||
#if !defined(__LITTLE_ENDIAN__)
|
||||
dst += 2;
|
||||
#endif
|
||||
if (ma->from(dst, src, 2))
|
||||
if (ma->from(dst, srcu, 2))
|
||||
goto fetch_fault;
|
||||
sign_extend(2, dst);
|
||||
ret = 0;
|
||||
@@ -333,7 +335,8 @@ static inline int handle_delayslot(struct pt_regs *regs,
|
||||
struct mem_access *ma)
|
||||
{
|
||||
opcode_t instruction;
|
||||
void *addr = (void *)(regs->pc + instruction_size(old_instruction));
|
||||
void __user *addr = (void __user *)(regs->pc +
|
||||
instruction_size(old_instruction));
|
||||
|
||||
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
|
||||
/* the instruction-fetch faulted */
|
||||
@@ -511,14 +514,6 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_SR_RB
|
||||
#define lookup_exception_vector(x) \
|
||||
__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
|
||||
#else
|
||||
#define lookup_exception_vector(x) \
|
||||
__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Handle various address error exceptions:
|
||||
* - instruction address error:
|
||||
@@ -542,7 +537,7 @@ asmlinkage void do_address_error(struct pt_regs *regs,
|
||||
|
||||
/* Intentional ifdef */
|
||||
#ifdef CONFIG_CPU_HAS_SR_RB
|
||||
lookup_exception_vector(error_code);
|
||||
error_code = lookup_exception_vector();
|
||||
#endif
|
||||
|
||||
oldfs = get_fs();
|
||||
@@ -559,7 +554,7 @@ asmlinkage void do_address_error(struct pt_regs *regs,
|
||||
}
|
||||
|
||||
set_fs(USER_DS);
|
||||
if (copy_from_user(&instruction, (void *)(regs->pc),
|
||||
if (copy_from_user(&instruction, (void __user *)(regs->pc),
|
||||
sizeof(instruction))) {
|
||||
/* Argh. Fault on the instruction itself.
|
||||
This should never happen non-SMP
|
||||
@@ -589,7 +584,7 @@ uspace_segv:
|
||||
die("unaligned program counter", regs, error_code);
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
if (copy_from_user(&instruction, (void *)(regs->pc),
|
||||
if (copy_from_user(&instruction, (void __user *)(regs->pc),
|
||||
sizeof(instruction))) {
|
||||
/* Argh. Fault on the instruction itself.
|
||||
This should never happen non-SMP
|
||||
@@ -683,7 +678,7 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
|
||||
}
|
||||
#endif
|
||||
|
||||
lookup_exception_vector(error_code);
|
||||
error_code = lookup_exception_vector();
|
||||
|
||||
local_irq_enable();
|
||||
CHK_REMOTE_DEBUG(regs);
|
||||
@@ -739,11 +734,13 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
|
||||
struct pt_regs __regs)
|
||||
{
|
||||
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
||||
unsigned long error_code;
|
||||
unsigned long inst;
|
||||
struct task_struct *tsk = current;
|
||||
#ifdef CONFIG_SH_FPU_EMU
|
||||
unsigned short inst = 0;
|
||||
|
||||
if (kprobe_handle_illslot(regs->pc) == 0)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_SH_FPU_EMU
|
||||
get_user(inst, (unsigned short *)regs->pc + 1);
|
||||
if (!do_fpu_inst(inst, regs)) {
|
||||
get_user(inst, (unsigned short *)regs->pc);
|
||||
@@ -754,12 +751,12 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
|
||||
/* not a FPU inst. */
|
||||
#endif
|
||||
|
||||
lookup_exception_vector(error_code);
|
||||
inst = lookup_exception_vector();
|
||||
|
||||
local_irq_enable();
|
||||
CHK_REMOTE_DEBUG(regs);
|
||||
force_sig(SIGILL, tsk);
|
||||
die_if_no_fixup("illegal slot instruction", regs, error_code);
|
||||
die_if_no_fixup("illegal slot instruction", regs, inst);
|
||||
}
|
||||
|
||||
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
|
||||
@@ -769,7 +766,7 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
|
||||
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
||||
long ex;
|
||||
|
||||
lookup_exception_vector(ex);
|
||||
ex = lookup_exception_vector();
|
||||
die_if_kernel("exception", regs, ex);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user