Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
This commit is contained in:
Linus Torvalds
2005-04-16 15:20:36 -07:00
commit 1da177e4c3
17291 changed files with 6718755 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
#
# Makefile for the Linux/SuperH CPU-specifc backends.
#
obj-y := irq_ipr.o irq_imask.o init.o bus.o
obj-$(CONFIG_CPU_SH2) += sh2/
obj-$(CONFIG_CPU_SH3) += sh3/
obj-$(CONFIG_CPU_SH4) += sh4/
obj-$(CONFIG_SH_RTC) += rtc.o
obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
USE_STANDARD_AS_RULE := true

36
arch/sh/kernel/cpu/adc.c Normal file
View File

@@ -0,0 +1,36 @@
/*
* linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
*
* Copyright (C) 2004 Andriy Skulysh <askulysh@image.kiev.ua>
*/
#include <linux/module.h>
#include <asm/adc.h>
#include <asm/io.h>
int adc_single(unsigned int channel)
{
int off;
unsigned char csr;
if (channel >= 8) return -1;
off = (channel & 0x03) << 2;
csr = ctrl_inb(ADCSR);
csr = channel | ADCSR_ADST | ADCSR_CKS;
ctrl_outb(csr, ADCSR);
do {
csr = ctrl_inb(ADCSR);
} while ((csr & ADCSR_ADF) == 0);
csr &= ~(ADCSR_ADF | ADCSR_ADST);
ctrl_outb(csr, ADCSR);
return (((ctrl_inb(ADDRAH + off) << 8) |
ctrl_inb(ADDRAL + off)) >> 6);
}
EXPORT_SYMBOL(adc_single);

195
arch/sh/kernel/cpu/bus.c Normal file
View File

@@ -0,0 +1,195 @@
/*
* arch/sh/kernel/cpu/bus.c
*
* Virtual bus for SuperH.
*
* Copyright (C) 2004 Paul Mundt
*
* Shamelessly cloned from arch/arm/mach-omap/bus.c, which was written
* by:
*
* Copyright (C) 2003 - 2004 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
* Portions of code based on sa1111.c.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/bus-sh.h>
static int sh_bus_match(struct device *dev, struct device_driver *drv)
{
struct sh_driver *shdrv = to_sh_driver(drv);
struct sh_dev *shdev = to_sh_dev(dev);
return shdev->dev_id == shdrv->dev_id;
}
static int sh_bus_suspend(struct device *dev, u32 state)
{
struct sh_dev *shdev = to_sh_dev(dev);
struct sh_driver *shdrv = to_sh_driver(dev->driver);
if (shdrv && shdrv->suspend)
return shdrv->suspend(shdev, state);
return 0;
}
static int sh_bus_resume(struct device *dev)
{
struct sh_dev *shdev = to_sh_dev(dev);
struct sh_driver *shdrv = to_sh_driver(dev->driver);
if (shdrv && shdrv->resume)
return shdrv->resume(shdev);
return 0;
}
static struct device sh_bus_devices[SH_NR_BUSES] = {
{
.bus_id = SH_BUS_NAME_VIRT,
},
};
struct bus_type sh_bus_types[SH_NR_BUSES] = {
{
.name = SH_BUS_NAME_VIRT,
.match = sh_bus_match,
.suspend = sh_bus_suspend,
.resume = sh_bus_resume,
},
};
static int sh_device_probe(struct device *dev)
{
struct sh_dev *shdev = to_sh_dev(dev);
struct sh_driver *shdrv = to_sh_driver(dev->driver);
if (shdrv && shdrv->probe)
return shdrv->probe(shdev);
return -ENODEV;
}
static int sh_device_remove(struct device *dev)
{
struct sh_dev *shdev = to_sh_dev(dev);
struct sh_driver *shdrv = to_sh_driver(dev->driver);
if (shdrv && shdrv->remove)
return shdrv->remove(shdev);
return 0;
}
int sh_device_register(struct sh_dev *dev)
{
if (!dev)
return -EINVAL;
if (dev->bus_id < 0 || dev->bus_id >= SH_NR_BUSES) {
printk(KERN_ERR "%s: bus_id invalid: %s bus: %d\n",
__FUNCTION__, dev->name, dev->bus_id);
return -EINVAL;
}
dev->dev.parent = &sh_bus_devices[dev->bus_id];
dev->dev.bus = &sh_bus_types[dev->bus_id];
/* This is needed for USB OHCI to work */
if (dev->dma_mask)
dev->dev.dma_mask = dev->dma_mask;
snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%s%u",
dev->name, dev->dev_id);
printk(KERN_INFO "Registering SH device '%s'. Parent at %s\n",
dev->dev.bus_id, dev->dev.parent->bus_id);
return device_register(&dev->dev);
}
void sh_device_unregister(struct sh_dev *dev)
{
device_unregister(&dev->dev);
}
int sh_driver_register(struct sh_driver *drv)
{
if (!drv)
return -EINVAL;
if (drv->bus_id < 0 || drv->bus_id >= SH_NR_BUSES) {
printk(KERN_ERR "%s: bus_id invalid: bus: %d device %d\n",
__FUNCTION__, drv->bus_id, drv->dev_id);
return -EINVAL;
}
drv->drv.probe = sh_device_probe;
drv->drv.remove = sh_device_remove;
drv->drv.bus = &sh_bus_types[drv->bus_id];
return driver_register(&drv->drv);
}
void sh_driver_unregister(struct sh_driver *drv)
{
driver_unregister(&drv->drv);
}
static int __init sh_bus_init(void)
{
int i, ret = 0;
for (i = 0; i < SH_NR_BUSES; i++) {
ret = device_register(&sh_bus_devices[i]);
if (ret != 0) {
printk(KERN_ERR "Unable to register bus device %s\n",
sh_bus_devices[i].bus_id);
continue;
}
ret = bus_register(&sh_bus_types[i]);
if (ret != 0) {
printk(KERN_ERR "Unable to register bus %s\n",
sh_bus_types[i].name);
device_unregister(&sh_bus_devices[i]);
}
}
printk(KERN_INFO "SH Virtual Bus initialized\n");
return ret;
}
static void __exit sh_bus_exit(void)
{
int i;
for (i = 0; i < SH_NR_BUSES; i++) {
bus_unregister(&sh_bus_types[i]);
device_unregister(&sh_bus_devices[i]);
}
}
module_init(sh_bus_init);
module_exit(sh_bus_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("SH Virtual Bus");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(sh_bus_types);
EXPORT_SYMBOL(sh_device_register);
EXPORT_SYMBOL(sh_device_unregister);
EXPORT_SYMBOL(sh_driver_register);
EXPORT_SYMBOL(sh_driver_unregister);

222
arch/sh/kernel/cpu/init.c Normal file
View File

@@ -0,0 +1,222 @@
/*
* arch/sh/kernel/cpu/init.c
*
* CPU init code
*
* Copyright (C) 2002, 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/io.h>
extern void detect_cpu_and_cache_system(void);
/*
* Generic wrapper for command line arguments to disable on-chip
* peripherals (nofpu, nodsp, and so forth).
*/
#define onchip_setup(x) \
static int x##_disabled __initdata = 0; \
\
static int __init x##_setup(char *opts) \
{ \
x##_disabled = 1; \
return 0; \
} \
__setup("no" __stringify(x), x##_setup);
onchip_setup(fpu);
onchip_setup(dsp);
/*
* Generic first-level cache init
*/
static void __init cache_init(void)
{
unsigned long ccr, flags;
if (cpu_data->type == CPU_SH_NONE)
panic("Unknown CPU");
jump_to_P2();
ccr = ctrl_inl(CCR);
/*
* If the cache is already enabled .. flush it.
*/
if (ccr & CCR_CACHE_ENABLE) {
unsigned long ways, waysize, addrstart;
waysize = cpu_data->dcache.sets;
/*
* If the OC is already in RAM mode, we only have
* half of the entries to flush..
*/
if (ccr & CCR_CACHE_ORA)
waysize >>= 1;
waysize <<= cpu_data->dcache.entry_shift;
#ifdef CCR_CACHE_EMODE
/* If EMODE is not set, we only have 1 way to flush. */
if (!(ccr & CCR_CACHE_EMODE))
ways = 1;
else
#endif
ways = cpu_data->dcache.ways;
addrstart = CACHE_OC_ADDRESS_ARRAY;
do {
unsigned long addr;
for (addr = addrstart;
addr < addrstart + waysize;
addr += cpu_data->dcache.linesz)
ctrl_outl(0, addr);
addrstart += cpu_data->dcache.way_incr;
} while (--ways);
}
/*
* Default CCR values .. enable the caches
* and invalidate them immediately..
*/
flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
#ifdef CCR_CACHE_EMODE
/* Force EMODE if possible */
if (cpu_data->dcache.ways > 1)
flags |= CCR_CACHE_EMODE;
#endif
#ifdef CONFIG_SH_WRITETHROUGH
/* Turn on Write-through caching */
flags |= CCR_CACHE_WT;
#else
/* .. or default to Write-back */
flags |= CCR_CACHE_CB;
#endif
#ifdef CONFIG_SH_OCRAM
/* Turn on OCRAM -- halve the OC */
flags |= CCR_CACHE_ORA;
cpu_data->dcache.sets >>= 1;
#endif
ctrl_outl(flags, CCR);
back_to_P1();
}
#ifdef CONFIG_SH_DSP
static void __init release_dsp(void)
{
unsigned long sr;
/* Clear SR.DSP bit */
__asm__ __volatile__ (
"stc\tsr, %0\n\t"
"and\t%1, %0\n\t"
"ldc\t%0, sr\n\t"
: "=&r" (sr)
: "r" (~SR_DSP)
);
}
static void __init dsp_init(void)
{
unsigned long sr;
/*
* Set the SR.DSP bit, wait for one instruction, and then read
* back the SR value.
*/
__asm__ __volatile__ (
"stc\tsr, %0\n\t"
"or\t%1, %0\n\t"
"ldc\t%0, sr\n\t"
"nop\n\t"
"stc\tsr, %0\n\t"
: "=&r" (sr)
: "r" (SR_DSP)
);
/* If the DSP bit is still set, this CPU has a DSP */
if (sr & SR_DSP)
cpu_data->flags |= CPU_HAS_DSP;
/* Now that we've determined the DSP status, clear the DSP bit. */
release_dsp();
}
#endif /* CONFIG_SH_DSP */
/**
* sh_cpu_init
*
* This is our initial entry point for each CPU, and is invoked on the boot
* CPU prior to calling start_kernel(). For SMP, a combination of this and
* start_secondary() will bring up each processor to a ready state prior
* to hand forking the idle loop.
*
* We do all of the basic processor init here, including setting up the
* caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
* hit (and subsequently platform_setup()) things like determining the
* CPU subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
* and cache configuration in detect_cpu_and_cache_system().
*/
asmlinkage void __init sh_cpu_init(void)
{
/* First, probe the CPU */
detect_cpu_and_cache_system();
/* Init the cache */
cache_init();
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");
cpu_data->flags &= ~CPU_HAS_FPU;
disable_fpu();
}
/* FPU initialization */
if ((cpu_data->flags & CPU_HAS_FPU)) {
clear_thread_flag(TIF_USEDFPU);
clear_used_math();
}
#ifdef CONFIG_SH_DSP
/* Probe for DSP */
dsp_init();
/* Disable the DSP */
if (dsp_disabled) {
printk("DSP Disabled\n");
cpu_data->flags &= ~CPU_HAS_DSP;
release_dsp();
}
#endif
#ifdef CONFIG_UBC_WAKEUP
/*
* Some brain-damaged loaders decided it would be a good idea to put
* the UBC to sleep. This causes some issues when it comes to things
* like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
* we wake it up and hope that all is well.
*/
ubc_wakeup();
#endif
}

View File

@@ -0,0 +1,116 @@
/* $Id: irq_imask.c,v 1.1.2.1 2002/11/17 10:53:43 mrbrown Exp $
*
* linux/arch/sh/kernel/irq_imask.c
*
* Copyright (C) 1999, 2000 Niibe Yutaka
*
* Simple interrupt handling using IMASK of SR register.
*
*/
/* NOTE: Will not work on level 15 */
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/irq.h>
/* Bitmap of IRQ masked */
static unsigned long imask_mask = 0x7fff;
static int interrupt_priority = 0;
static void enable_imask_irq(unsigned int irq);
static void disable_imask_irq(unsigned int irq);
static void shutdown_imask_irq(unsigned int irq);
static void mask_and_ack_imask(unsigned int);
static void end_imask_irq(unsigned int irq);
#define IMASK_PRIORITY 15
static unsigned int startup_imask_irq(unsigned int irq)
{
/* Nothing to do */
return 0; /* never anything pending */
}
static struct hw_interrupt_type imask_irq_type = {
"SR.IMASK",
startup_imask_irq,
shutdown_imask_irq,
enable_imask_irq,
disable_imask_irq,
mask_and_ack_imask,
end_imask_irq
};
void static inline set_interrupt_registers(int ip)
{
unsigned long __dummy;
asm volatile("ldc %2, r6_bank\n\t"
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
"shlr2 %0\n\t"
"cmp/eq #0x3c, %0\n\t"
"bt/s 1f ! CLI-ed\n\t"
" stc sr, %0\n\t"
"and %1, %0\n\t"
"or %2, %0\n\t"
"ldc %0, sr\n"
"1:"
: "=&z" (__dummy)
: "r" (~0xf0), "r" (ip << 4)
: "t");
}
static void disable_imask_irq(unsigned int irq)
{
clear_bit(irq, &imask_mask);
if (interrupt_priority < IMASK_PRIORITY - irq)
interrupt_priority = IMASK_PRIORITY - irq;
set_interrupt_registers(interrupt_priority);
}
static void enable_imask_irq(unsigned int irq)
{
set_bit(irq, &imask_mask);
interrupt_priority = IMASK_PRIORITY - ffz(imask_mask);
set_interrupt_registers(interrupt_priority);
}
static void mask_and_ack_imask(unsigned int irq)
{
disable_imask_irq(irq);
}
static void end_imask_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
enable_imask_irq(irq);
}
static void shutdown_imask_irq(unsigned int irq)
{
/* Nothing to do */
}
void make_imask_irq(unsigned int irq)
{
disable_irq_nosync(irq);
irq_desc[irq].handler = &imask_irq_type;
enable_irq(irq);
}

View File

@@ -0,0 +1,339 @@
/* $Id: irq_ipr.c,v 1.1.2.1 2002/11/17 10:53:43 mrbrown Exp $
*
* linux/arch/sh/kernel/irq_ipr.c
*
* Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
* Copyright (C) 2000 Kazumoto Kojima
* Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
*
* Interrupt handling for IPR-based IRQ.
*
* Supported system:
* On-chip supporting modules (TMU, RTC, etc.).
* On-chip supporting modules for SH7709/SH7709A/SH7729/SH7300.
* Hitachi SolutionEngine external I/O:
* MS7709SE01, MS7709ASE01, and MS7750SE01
*
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/machvec.h>
struct ipr_data {
unsigned int addr; /* Address of Interrupt Priority Register */
int shift; /* Shifts of the 16-bit data */
int priority; /* The priority */
};
static struct ipr_data ipr_data[NR_IRQS];
static void enable_ipr_irq(unsigned int irq);
static void disable_ipr_irq(unsigned int irq);
/* shutdown is same as "disable" */
#define shutdown_ipr_irq disable_ipr_irq
static void mask_and_ack_ipr(unsigned int);
static void end_ipr_irq(unsigned int irq);
static unsigned int startup_ipr_irq(unsigned int irq)
{
enable_ipr_irq(irq);
return 0; /* never anything pending */
}
static struct hw_interrupt_type ipr_irq_type = {
"IPR-IRQ",
startup_ipr_irq,
shutdown_ipr_irq,
enable_ipr_irq,
disable_ipr_irq,
mask_and_ack_ipr,
end_ipr_irq
};
static void disable_ipr_irq(unsigned int irq)
{
unsigned long val, flags;
unsigned int addr = ipr_data[irq].addr;
unsigned short mask = 0xffff ^ (0x0f << ipr_data[irq].shift);
/* Set the priority in IPR to 0 */
local_irq_save(flags);
val = ctrl_inw(addr);
val &= mask;
ctrl_outw(val, addr);
local_irq_restore(flags);
}
static void enable_ipr_irq(unsigned int irq)
{
unsigned long val, flags;
unsigned int addr = ipr_data[irq].addr;
int priority = ipr_data[irq].priority;
unsigned short value = (priority << ipr_data[irq].shift);
/* Set priority in IPR back to original value */
local_irq_save(flags);
val = ctrl_inw(addr);
val |= value;
ctrl_outw(val, addr);
local_irq_restore(flags);
}
static void mask_and_ack_ipr(unsigned int irq)
{
disable_ipr_irq(irq);
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
/* This is needed when we use edge triggered setting */
/* XXX: Is it really needed? */
if (IRQ0_IRQ <= irq && irq <= IRQ5_IRQ) {
/* Clear external interrupt request */
int a = ctrl_inb(INTC_IRR0);
a &= ~(1 << (irq - IRQ0_IRQ));
ctrl_outb(a, INTC_IRR0);
}
#endif
}
static void end_ipr_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
enable_ipr_irq(irq);
}
void make_ipr_irq(unsigned int irq, unsigned int addr, int pos, int priority)
{
disable_irq_nosync(irq);
ipr_data[irq].addr = addr;
ipr_data[irq].shift = pos*4; /* POSition (0-3) x 4 means shift */
ipr_data[irq].priority = priority;
irq_desc[irq].handler = &ipr_irq_type;
disable_ipr_irq(irq);
}
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
static unsigned char pint_map[256];
static unsigned long portcr_mask = 0;
static void enable_pint_irq(unsigned int irq);
static void disable_pint_irq(unsigned int irq);
/* shutdown is same as "disable" */
#define shutdown_pint_irq disable_pint_irq
static void mask_and_ack_pint(unsigned int);
static void end_pint_irq(unsigned int irq);
static unsigned int startup_pint_irq(unsigned int irq)
{
enable_pint_irq(irq);
return 0; /* never anything pending */
}
static struct hw_interrupt_type pint_irq_type = {
"PINT-IRQ",
startup_pint_irq,
shutdown_pint_irq,
enable_pint_irq,
disable_pint_irq,
mask_and_ack_pint,
end_pint_irq
};
static void disable_pint_irq(unsigned int irq)
{
unsigned long val, flags;
local_irq_save(flags);
val = ctrl_inw(INTC_INTER);
val &= ~(1 << (irq - PINT_IRQ_BASE));
ctrl_outw(val, INTC_INTER); /* disable PINTn */
portcr_mask &= ~(3 << (irq - PINT_IRQ_BASE)*2);
local_irq_restore(flags);
}
static void enable_pint_irq(unsigned int irq)
{
unsigned long val, flags;
local_irq_save(flags);
val = ctrl_inw(INTC_INTER);
val |= 1 << (irq - PINT_IRQ_BASE);
ctrl_outw(val, INTC_INTER); /* enable PINTn */
portcr_mask |= 3 << (irq - PINT_IRQ_BASE)*2;
local_irq_restore(flags);
}
static void mask_and_ack_pint(unsigned int irq)
{
disable_pint_irq(irq);
}
static void end_pint_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
enable_pint_irq(irq);
}
void make_pint_irq(unsigned int irq)
{
disable_irq_nosync(irq);
irq_desc[irq].handler = &pint_irq_type;
disable_pint_irq(irq);
}
#endif
void __init init_IRQ(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
int i;
#endif
make_ipr_irq(TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY);
make_ipr_irq(TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY);
#if defined(CONFIG_SH_RTC)
make_ipr_irq(RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY);
#endif
#ifdef SCI_ERI_IRQ
make_ipr_irq(SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
make_ipr_irq(SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
make_ipr_irq(SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
#endif
#ifdef SCIF1_ERI_IRQ
make_ipr_irq(SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
make_ipr_irq(SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
make_ipr_irq(SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
make_ipr_irq(SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7300)
make_ipr_irq(SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY);
make_ipr_irq(DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY);
make_ipr_irq(DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY);
make_ipr_irq(VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY);
#endif
#ifdef SCIF_ERI_IRQ
make_ipr_irq(SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
make_ipr_irq(SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
make_ipr_irq(SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
make_ipr_irq(SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
#endif
#ifdef IRDA_ERI_IRQ
make_ipr_irq(IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
make_ipr_irq(IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
make_ipr_irq(IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
make_ipr_irq(IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
/*
* Initialize the Interrupt Controller (INTC)
* registers to their power on values
*/
/*
* Enable external irq (INTC IRQ mode).
* You should set corresponding bits of PFC to "00"
* to enable these interrupts.
*/
make_ipr_irq(IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY);
make_ipr_irq(IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY);
make_ipr_irq(IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY);
make_ipr_irq(IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY);
make_ipr_irq(IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY);
make_ipr_irq(IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY);
#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
make_ipr_irq(PINT0_IRQ, PINT0_IPR_ADDR, PINT0_IPR_POS, PINT0_PRIORITY);
make_ipr_irq(PINT8_IRQ, PINT8_IPR_ADDR, PINT8_IPR_POS, PINT8_PRIORITY);
enable_ipr_irq(PINT0_IRQ);
enable_ipr_irq(PINT8_IRQ);
for(i = 0; i < 16; i++)
make_pint_irq(PINT_IRQ_BASE + i);
for(i = 0; i < 256; i++)
{
if(i & 1) pint_map[i] = 0;
else if(i & 2) pint_map[i] = 1;
else if(i & 4) pint_map[i] = 2;
else if(i & 8) pint_map[i] = 3;
else if(i & 0x10) pint_map[i] = 4;
else if(i & 0x20) pint_map[i] = 5;
else if(i & 0x40) pint_map[i] = 6;
else if(i & 0x80) pint_map[i] = 7;
}
#endif /* !CONFIG_CPU_SUBTYPE_SH7300 */
#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 || CONFIG_CPU_SUBTYPE_SH7300*/
#ifdef CONFIG_CPU_SUBTYPE_ST40
init_IRQ_intc2();
#endif
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq != NULL) {
sh_mv.mv_init_irq();
}
}
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
int ipr_irq_demux(int irq)
{
#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
unsigned long creg, dreg, d, sav;
if(irq == PINT0_IRQ)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7707)
creg = PORT_PACR;
dreg = PORT_PADR;
#else
creg = PORT_PCCR;
dreg = PORT_PCDR;
#endif
sav = ctrl_inw(creg);
ctrl_outw(sav | portcr_mask, creg);
d = (~ctrl_inb(dreg) ^ ctrl_inw(INTC_ICR2)) & ctrl_inw(INTC_INTER) & 0xff;
ctrl_outw(sav, creg);
if(d == 0) return irq;
return PINT_IRQ_BASE + pint_map[d];
}
else if(irq == PINT8_IRQ)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7707)
creg = PORT_PBCR;
dreg = PORT_PBDR;
#else
creg = PORT_PFCR;
dreg = PORT_PFDR;
#endif
sav = ctrl_inw(creg);
ctrl_outw(sav | (portcr_mask >> 16), creg);
d = (~ctrl_inb(dreg) ^ (ctrl_inw(INTC_ICR2) >> 8)) & (ctrl_inw(INTC_INTER) >> 8) & 0xff;
ctrl_outw(sav, creg);
if(d == 0) return irq;
return PINT_IRQ_BASE + 8 + pint_map[d];
}
#endif
return irq;
}
#endif
EXPORT_SYMBOL(make_ipr_irq);

136
arch/sh/kernel/cpu/rtc.c Normal file
View File

@@ -0,0 +1,136 @@
/*
* linux/arch/sh/kernel/rtc.c -- SH3 / SH4 on-chip RTC support
*
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <asm/io.h>
#include <asm/rtc.h>
#ifndef BCD_TO_BIN
#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
#endif
#ifndef BIN_TO_BCD
#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
#endif
void sh_rtc_gettimeofday(struct timespec *ts)
{
unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit;
unsigned long flags;
again:
do {
local_irq_save(flags);
ctrl_outb(0, RCR1); /* Clear CF-bit */
sec128 = ctrl_inb(R64CNT);
sec = ctrl_inb(RSECCNT);
min = ctrl_inb(RMINCNT);
hr = ctrl_inb(RHRCNT);
wk = ctrl_inb(RWKCNT);
day = ctrl_inb(RDAYCNT);
mon = ctrl_inb(RMONCNT);
#if defined(CONFIG_CPU_SH4)
yr = ctrl_inw(RYRCNT);
yr100 = (yr >> 8);
yr &= 0xff;
#else
yr = ctrl_inb(RYRCNT);
yr100 = (yr == 0x99) ? 0x19 : 0x20;
#endif
sec2 = ctrl_inb(R64CNT);
cf_bit = ctrl_inb(RCR1) & RCR1_CF;
local_irq_restore(flags);
} while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0);
BCD_TO_BIN(yr100);
BCD_TO_BIN(yr);
BCD_TO_BIN(mon);
BCD_TO_BIN(day);
BCD_TO_BIN(hr);
BCD_TO_BIN(min);
BCD_TO_BIN(sec);
if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
hr > 23 || min > 59 || sec > 59) {
printk(KERN_ERR
"SH RTC: invalid value, resetting to 1 Jan 2000\n");
local_irq_save(flags);
ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
ctrl_outb(0, RSECCNT);
ctrl_outb(0, RMINCNT);
ctrl_outb(0, RHRCNT);
ctrl_outb(6, RWKCNT);
ctrl_outb(1, RDAYCNT);
ctrl_outb(1, RMONCNT);
#if defined(CONFIG_CPU_SH4)
ctrl_outw(0x2000, RYRCNT);
#else
ctrl_outb(0, RYRCNT);
#endif
ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
goto again;
}
#if RTC_BIT_INVERTED != 0
if ((sec128 & RTC_BIT_INVERTED))
sec--;
#endif
ts->tv_sec = mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
ts->tv_nsec = ((sec128 * 1000000) / 128) * 1000;
}
/*
* Changed to only care about tv_sec, and not the full timespec struct
* (i.e. tv_nsec). It can easily be switched to timespec for future cpus
* that support setting usec or nsec RTC values.
*/
int sh_rtc_settimeofday(const time_t secs)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
unsigned long flags;
local_irq_save(flags);
ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
cmos_minutes = ctrl_inb(RMINCNT);
BCD_TO_BIN(cmos_minutes);
/*
* since we're only adjusting minutes and seconds,
* don't interfere with hour overflow. This avoids
* messing with unknown time zones but requires your
* RTC not to be off by more than 15 minutes
*/
real_seconds = secs % 60;
real_minutes = secs / 60;
if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
real_minutes += 30; /* correct for half hour time zone */
real_minutes %= 60;
if (abs(real_minutes - cmos_minutes) < 30) {
BIN_TO_BCD(real_seconds);
BIN_TO_BCD(real_minutes);
ctrl_outb(real_seconds, RSECCNT);
ctrl_outb(real_minutes, RMINCNT);
} else {
printk(KERN_WARNING
"set_rtc_time: can't update from %d to %d\n",
cmos_minutes, real_minutes);
retval = -1;
}
ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
local_irq_restore(flags);
return retval;
}

View File

@@ -0,0 +1,6 @@
#
# Makefile for the Linux/SuperH SH-2 backends.
#
obj-y := probe.o

View File

@@ -0,0 +1,39 @@
/*
* arch/sh/kernel/cpu/sh2/probe.c
*
* CPU Subtype Probing for SH-2.
*
* Copyright (C) 2002 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
int __init detect_cpu_and_cache_system(void)
{
/*
* For now, assume SH7604 .. fix this later.
*/
cpu_data->type = CPU_SH7604;
cpu_data->dcache.ways = 4;
cpu_data->dcache.way_shift = 6;
cpu_data->dcache.sets = 64;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
/*
* SH-2 doesn't have separate caches
*/
cpu_data->dcache.flags |= SH_CACHE_COMBINED;
cpu_data->icache = cpu_data->dcache;
return 0;
}

View File

@@ -0,0 +1,6 @@
#
# Makefile for the Linux/SuperH SH-3 backends.
#
obj-y := ex.o probe.o

199
arch/sh/kernel/cpu/sh3/ex.S Normal file
View File

@@ -0,0 +1,199 @@
/*
* arch/sh/kernel/cpu/sh3/ex.S
*
* The SH-3 exception vector table.
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#include <linux/linkage.h>
#include <linux/config.h>
.align 2
.data
ENTRY(exception_handling_table)
.long exception_error /* 000 */
.long exception_error
#if defined(CONFIG_MMU)
.long tlb_miss_load /* 040 */
.long tlb_miss_store
.long initial_page_write
.long tlb_protection_violation_load
.long tlb_protection_violation_store
.long address_error_load
.long address_error_store /* 100 */
#else
.long exception_error ! tlb miss load /* 040 */
.long exception_error ! tlb miss store
.long exception_error ! initial page write
.long exception_error ! tlb prot violation load
.long exception_error ! tlb prot violation store
.long exception_error ! address error load
.long exception_error ! address error store /* 100 */
#endif
.long exception_error ! fpu_exception /* 120 */
.long exception_error /* 140 */
.long system_call ! Unconditional Trap /* 160 */
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
ENTRY(nmi_slot)
#if defined (CONFIG_KGDB_NMI)
.long debug_enter /* 1C0 */ ! Allow trap to debugger
#else
.long exception_none /* 1C0 */ ! Not implemented yet
#endif
ENTRY(user_break_point_trap)
.long break_point_trap /* 1E0 */
ENTRY(interrupt_table)
! external hardware
.long do_IRQ ! 0000 /* 200 */
.long do_IRQ ! 0001
.long do_IRQ ! 0010
.long do_IRQ ! 0011
.long do_IRQ ! 0100
.long do_IRQ ! 0101
.long do_IRQ ! 0110
.long do_IRQ ! 0111
.long do_IRQ ! 1000 /* 300 */
.long do_IRQ ! 1001
.long do_IRQ ! 1010
.long do_IRQ ! 1011
.long do_IRQ ! 1100
.long do_IRQ ! 1101
.long do_IRQ ! 1110
.long exception_error
! Internal hardware
.long do_IRQ ! TMU0 tuni0 /* 400 */
.long do_IRQ ! TMU1 tuni1
.long do_IRQ ! TMU2 tuni2
.long do_IRQ ! ticpi2
.long do_IRQ ! RTC ati
.long do_IRQ ! pri
.long do_IRQ ! cui
.long do_IRQ ! SCI eri
.long do_IRQ ! rxi /* 500 */
.long do_IRQ ! txi
.long do_IRQ ! tei
.long do_IRQ ! WDT iti /* 560 */
.long do_IRQ ! REF rcmi
.long do_IRQ ! rovi
.long do_IRQ
.long do_IRQ /* 5E0 */
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
.long do_IRQ ! 32 IRQ irq0 /* 600 */
.long do_IRQ ! 33 irq1
.long do_IRQ ! 34 irq2
.long do_IRQ ! 35 irq3
.long do_IRQ ! 36 irq4
.long do_IRQ ! 37 irq5
.long do_IRQ ! 38
.long do_IRQ ! 39
.long do_IRQ ! 40 PINT pint0-7 /* 700 */
.long do_IRQ ! 41 pint8-15
.long do_IRQ ! 42
.long do_IRQ ! 43
.long do_IRQ ! 44
.long do_IRQ ! 45
.long do_IRQ ! 46
.long do_IRQ ! 47
.long do_IRQ ! 48 DMAC dei0 /* 800 */
.long do_IRQ ! 49 dei1
.long do_IRQ ! 50 dei2
.long do_IRQ ! 51 dei3
.long do_IRQ ! 52 IrDA eri1
.long do_IRQ ! 53 rxi1
.long do_IRQ ! 54 bri1
.long do_IRQ ! 55 txi1
.long do_IRQ ! 56 SCIF eri2
.long do_IRQ ! 57 rxi2
.long do_IRQ ! 58 bri2
.long do_IRQ ! 59 txi2
.long do_IRQ ! 60 ADC adi /* 980 */
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
.long exception_none ! 61 /* 9A0 */
.long exception_none ! 62
.long exception_none ! 63
.long exception_none ! 64 /* A00 */
.long do_IRQ ! 65 USB usi0
.long do_IRQ ! 66 usi1
.long exception_none ! 67
.long exception_none ! 68
.long exception_none ! 69
.long exception_none ! 70
.long exception_none ! 71
.long exception_none ! 72 /* B00 */
.long exception_none ! 73
.long exception_none ! 74
.long exception_none ! 75
.long exception_none ! 76
.long exception_none ! 77
.long exception_none ! 78
.long exception_none ! 79
.long do_IRQ ! 80 TPU0 tpi0 /* C00 */
.long do_IRQ ! 81 TPU1 tpi1
.long exception_none ! 82
.long exception_none ! 83
.long do_IRQ ! 84 TPU2 tpi2
.long do_IRQ ! 85 TPU3 tpi3 /* CA0 */
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7300)
.long do_IRQ ! 61 LCDC lcdi /* 9A0 */
.long do_IRQ ! 62 PCC pcc0i
.long do_IRQ ! 63 pcc1i /* 9E0 */
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7300)
.long do_IRQ ! 64
.long do_IRQ ! 65
.long do_IRQ ! 66
.long do_IRQ ! 67
.long do_IRQ ! 68
.long do_IRQ ! 69
.long do_IRQ ! 70
.long do_IRQ ! 71
.long do_IRQ ! 72
.long do_IRQ ! 73
.long do_IRQ ! 74
.long do_IRQ ! 75
.long do_IRQ ! 76
.long do_IRQ ! 77
.long do_IRQ ! 78
.long do_IRQ ! 79
.long do_IRQ ! 80 SCIF0(SH7300)
.long do_IRQ ! 81
.long do_IRQ ! 82
.long do_IRQ ! 83
.long do_IRQ ! 84
.long do_IRQ ! 85
.long do_IRQ ! 86
.long do_IRQ ! 87
.long do_IRQ ! 88
.long do_IRQ ! 89
.long do_IRQ ! 90
.long do_IRQ ! 91
.long do_IRQ ! 92
.long do_IRQ ! 93
.long do_IRQ ! 94
.long do_IRQ ! 95
.long do_IRQ ! 96
.long do_IRQ ! 97
.long do_IRQ ! 98
.long do_IRQ ! 99
.long do_IRQ ! 100
.long do_IRQ ! 101
.long do_IRQ ! 102
.long do_IRQ ! 103
.long do_IRQ ! 104
.long do_IRQ ! 105
.long do_IRQ ! 106
.long do_IRQ ! 107
.long do_IRQ ! 108
#endif
#endif

View File

@@ -0,0 +1,97 @@
/*
* arch/sh/kernel/cpu/sh3/probe.c
*
* CPU Subtype Probing for SH-3.
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2002 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
int __init detect_cpu_and_cache_system(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
jump_to_P2();
/*
* Check if the entry shadows or not.
* When shadowed, it's 128-entry system.
* Otherwise, it's 256-entry system.
*/
addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
/* First, write back & invalidate */
data0 = ctrl_inl(addr0);
ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
data1 = ctrl_inl(addr1);
ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
/* Next, check if there's shadow or not */
data0 = ctrl_inl(addr0);
data0 ^= SH_CACHE_VALID;
ctrl_outl(data0, addr0);
data1 = ctrl_inl(addr1);
data2 = data1 ^ SH_CACHE_VALID;
ctrl_outl(data2, addr1);
data3 = ctrl_inl(addr0);
/* Lastly, invaliate them. */
ctrl_outl(data0&~SH_CACHE_VALID, addr0);
ctrl_outl(data2&~SH_CACHE_VALID, addr1);
back_to_P1();
cpu_data->dcache.ways = 4;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
/*
* 7709A/7729 has 16K cache (256-entry), while 7702 has only
* 2K(direct) 7702 is not supported (yet)
*/
if (data0 == data1 && data2 == data3) { /* Shadow */
cpu_data->dcache.way_incr = (1 << 11);
cpu_data->dcache.entry_mask = 0x7f0;
cpu_data->dcache.sets = 128;
cpu_data->type = CPU_SH7708;
cpu_data->flags |= CPU_HAS_MMU_PAGE_ASSOC;
} else { /* 7709A or 7729 */
cpu_data->dcache.way_incr = (1 << 12);
cpu_data->dcache.entry_mask = 0xff0;
cpu_data->dcache.sets = 256;
cpu_data->type = CPU_SH7729;
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
cpu_data->type = CPU_SH7705;
#if defined(CONFIG_SH7705_CACHE_32KB)
cpu_data->dcache.way_incr = (1 << 13);
cpu_data->dcache.entry_mask = 0x1ff0;
cpu_data->dcache.sets = 512;
ctrl_outl(CCR_CACHE_32KB, CCR3);
#else
ctrl_outl(CCR_CACHE_16KB, CCR3);
#endif
#endif
}
/*
* SH-3 doesn't have separate caches
*/
cpu_data->dcache.flags |= SH_CACHE_COMBINED;
cpu_data->icache = cpu_data->dcache;
return 0;
}

View File

@@ -0,0 +1,10 @@
#
# Makefile for the Linux/SuperH SH-4 backends.
#
obj-y := ex.o probe.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += irq_intc2.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o

384
arch/sh/kernel/cpu/sh4/ex.S Normal file
View File

@@ -0,0 +1,384 @@
/*
* arch/sh/kernel/cpu/sh4/ex.S
*
* The SH-4 exception vector table.
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#include <linux/linkage.h>
#include <linux/config.h>
.align 2
.data
ENTRY(exception_handling_table)
.long exception_error /* 000 */
.long exception_error
#if defined(CONFIG_MMU)
.long tlb_miss_load /* 040 */
.long tlb_miss_store
.long initial_page_write
.long tlb_protection_violation_load
.long tlb_protection_violation_store
.long address_error_load
.long address_error_store /* 100 */
#else
.long exception_error ! tlb miss load /* 040 */
.long exception_error ! tlb miss store
.long exception_error ! initial page write
.long exception_error ! tlb prot violation load
.long exception_error ! tlb prot violation store
.long exception_error ! address error load
.long exception_error ! address error store /* 100 */
#endif
#if defined(CONFIG_SH_FPU)
.long do_fpu_error /* 120 */
#else
.long exception_error /* 120 */
#endif
.long exception_error /* 140 */
.long system_call ! Unconditional Trap /* 160 */
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
ENTRY(nmi_slot)
#if defined (CONFIG_KGDB_NMI)
.long debug_enter /* 1C0 */ ! Allow trap to debugger
#else
.long exception_none /* 1C0 */ ! Not implemented yet
#endif
ENTRY(user_break_point_trap)
.long break_point_trap /* 1E0 */
ENTRY(interrupt_table)
! external hardware
.long do_IRQ ! 0000 /* 200 */
.long do_IRQ ! 0001
.long do_IRQ ! 0010
.long do_IRQ ! 0011
.long do_IRQ ! 0100
.long do_IRQ ! 0101
.long do_IRQ ! 0110
.long do_IRQ ! 0111
.long do_IRQ ! 1000 /* 300 */
.long do_IRQ ! 1001
.long do_IRQ ! 1010
.long do_IRQ ! 1011
.long do_IRQ ! 1100
.long do_IRQ ! 1101
.long do_IRQ ! 1110
.long exception_error
! Internal hardware
.long do_IRQ ! TMU0 tuni0 /* 400 */
.long do_IRQ ! TMU1 tuni1
.long do_IRQ ! TMU2 tuni2
.long do_IRQ ! ticpi2
#if defined(CONFIG_CPU_SUBTYPE_SH7760)
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error /* 500 */
.long exception_error
.long exception_error
#else
.long do_IRQ ! RTC ati
.long do_IRQ ! pri
.long do_IRQ ! cui
.long do_IRQ ! SCI eri
.long do_IRQ ! rxi /* 500 */
.long do_IRQ ! txi
.long do_IRQ ! tei
#endif
.long do_IRQ ! WDT iti /* 560 */
.long do_IRQ ! REF rcmi
.long do_IRQ ! rovi
.long do_IRQ
.long do_IRQ /* 5E0 */
.long do_IRQ ! 32 Hitachi UDI /* 600 */
.long do_IRQ ! 33 GPIO
.long do_IRQ ! 34 DMAC dmte0
.long do_IRQ ! 35 dmte1
.long do_IRQ ! 36 dmte2
.long do_IRQ ! 37 dmte3
.long do_IRQ ! 38 dmae
.long exception_error ! 39 /* 6E0 */
#if defined(CONFIG_CPU_SUBTYPE_SH7760)
.long exception_error /* 700 */
.long exception_error
.long exception_error
.long exception_error /* 760 */
#else
.long do_IRQ ! 40 SCIF eri /* 700 */
.long do_IRQ ! 41 rxi
.long do_IRQ ! 42 bri
.long do_IRQ ! 43 txi
#endif
#if CONFIG_NR_ONCHIP_DMA_CHANNELS == 8
.long do_IRQ ! 44 DMAC dmte4 /* 780 */
.long do_IRQ ! 45 dmte5
.long do_IRQ ! 46 dmte6
.long do_IRQ ! 47 dmte7 /* 7E0 */
#else
.long exception_error ! 44 /* 780 */
.long exception_error ! 45
.long exception_error ! 46
.long exception_error ! 47
#endif
#if defined(CONFIG_SH_FPU)
.long do_fpu_state_restore ! 48 /* 800 */
.long do_fpu_state_restore ! 49 /* 820 */
#else
.long exception_error
.long exception_error
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7751)
.long exception_error /* 840 */
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error /* 900 */
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long do_IRQ ! PCI serr /* A00 */
.long do_IRQ ! dma3
.long do_IRQ ! dma2
.long do_IRQ ! dma1
.long do_IRQ ! dma0
.long do_IRQ ! pwon
.long do_IRQ ! pwdwn
.long do_IRQ ! err
.long do_IRQ ! TMU3 tuni3 /* B00 */
.long exception_error
.long exception_error
.long exception_error
.long do_IRQ ! TMU4 tuni4 /* B80 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
.long do_IRQ ! IRQ irq6 /* 840 */
.long do_IRQ ! irq7
.long do_IRQ ! SCIF eri0
.long do_IRQ ! rxi0
.long do_IRQ ! bri0
.long do_IRQ ! txi0
.long do_IRQ ! HCAN2 cani0 /* 900 */
.long do_IRQ ! cani1
.long do_IRQ ! SSI ssii0
.long do_IRQ ! ssii1
.long do_IRQ ! HAC haci0
.long do_IRQ ! haci1
.long do_IRQ ! IIC iici0
.long do_IRQ ! iici1
.long do_IRQ ! USB usbi /* A00 */
.long do_IRQ ! LCDC vint
.long exception_error
.long exception_error
.long do_IRQ ! DMABRG dmabrgi0
.long do_IRQ ! dmabrgi1
.long do_IRQ ! dmabrgi2
.long exception_error
.long do_IRQ ! SCIF eri1 /* B00 */
.long do_IRQ ! rxi1
.long do_IRQ ! bri1
.long do_IRQ ! txi1
.long do_IRQ ! eri2
.long do_IRQ ! rxi2
.long do_IRQ ! bri2
.long do_IRQ ! txi2
.long do_IRQ ! SIM simeri /* C00 */
.long do_IRQ ! simrxi
.long do_IRQ ! simtxi
.long do_IRQ ! simtei
.long do_IRQ ! HSPI spii
.long exception_error
.long exception_error
.long exception_error
.long do_IRQ ! MMCIF mmci0 /* D00 */
.long do_IRQ ! mmci1
.long do_IRQ ! mmci2
.long do_IRQ ! mmci3
.long exception_error
.long exception_error
.long exception_error
.long exception_error
.long exception_error /* E00 */
.long exception_error
.long exception_error
.long exception_error
.long do_IRQ ! MFI mfii
.long exception_error
.long exception_error
.long exception_error
.long exception_error /* F00 */
.long exception_error
.long exception_error
.long exception_error
.long do_IRQ ! ADC adi
.long do_IRQ ! CMT cmti /* FA0 */
#elif defined(CONFIG_CPU_SUBTYPE_SH73180)
.long do_IRQ ! 50 0x840
.long do_IRQ ! 51 0x860
.long do_IRQ ! 52 0x880
.long do_IRQ ! 53 0x8a0
.long do_IRQ ! 54 0x8c0
.long do_IRQ ! 55 0x8e0
.long do_IRQ ! 56 0x900
.long do_IRQ ! 57 0x920
.long do_IRQ ! 58 0x940
.long do_IRQ ! 59 0x960
.long do_IRQ ! 60 0x980
.long do_IRQ ! 61 0x9a0
.long do_IRQ ! 62 0x9c0
.long do_IRQ ! 63 0x9e0
.long do_IRQ ! 64 0xa00
.long do_IRQ ! 65 0xa20
.long do_IRQ ! 66 0xa40
.long do_IRQ ! 67 0xa60
.long do_IRQ ! 68 0xa80
.long do_IRQ ! 69 0xaa0
.long do_IRQ ! 70 0xac0
.long do_IRQ ! 71 0xae0
.long do_IRQ ! 72 0xb00
.long do_IRQ ! 73 0xb20
.long do_IRQ ! 74 0xb40
.long do_IRQ ! 75 0xb60
.long do_IRQ ! 76 0xb80
.long do_IRQ ! 77 0xba0
.long do_IRQ ! 78 0xbc0
.long do_IRQ ! 79 0xbe0
.long do_IRQ ! 80 0xc00
.long do_IRQ ! 81 0xc20
.long do_IRQ ! 82 0xc40
.long do_IRQ ! 83 0xc60
.long do_IRQ ! 84 0xc80
.long do_IRQ ! 85 0xca0
.long do_IRQ ! 86 0xcc0
.long do_IRQ ! 87 0xce0
.long do_IRQ ! 88 0xd00
.long do_IRQ ! 89 0xd20
.long do_IRQ ! 90 0xd40
.long do_IRQ ! 91 0xd60
.long do_IRQ ! 92 0xd80
.long do_IRQ ! 93 0xda0
.long do_IRQ ! 94 0xdc0
.long do_IRQ ! 95 0xde0
.long do_IRQ ! 96 0xe00
.long do_IRQ ! 97 0xe20
.long do_IRQ ! 98 0xe40
.long do_IRQ ! 99 0xe60
.long do_IRQ ! 100 0xe80
.long do_IRQ ! 101 0xea0
.long do_IRQ ! 102 0xec0
.long do_IRQ ! 103 0xee0
.long do_IRQ ! 104 0xf00
.long do_IRQ ! 105 0xf20
.long do_IRQ ! 106 0xf40
.long do_IRQ ! 107 0xf60
.long do_IRQ ! 108 0xf80
#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1)
.long exception_error ! 50 0x840
.long exception_error ! 51 0x860
.long exception_error ! 52 0x880
.long exception_error ! 53 0x8a0
.long exception_error ! 54 0x8c0
.long exception_error ! 55 0x8e0
.long exception_error ! 56 0x900
.long exception_error ! 57 0x920
.long exception_error ! 58 0x940
.long exception_error ! 59 0x960
.long exception_error ! 60 0x980
.long exception_error ! 61 0x9a0
.long exception_error ! 62 0x9c0
.long exception_error ! 63 0x9e0
.long do_IRQ ! 64 0xa00 PCI serr
.long do_IRQ ! 65 0xa20 err
.long do_IRQ ! 66 0xa40 ad
.long do_IRQ ! 67 0xa60 pwr_dwn
.long exception_error ! 68 0xa80
.long exception_error ! 69 0xaa0
.long exception_error ! 70 0xac0
.long exception_error ! 71 0xae0
.long do_IRQ ! 72 0xb00 DMA INT0
.long do_IRQ ! 73 0xb20 INT1
.long do_IRQ ! 74 0xb40 INT2
.long do_IRQ ! 75 0xb60 INT3
.long do_IRQ ! 76 0xb80 INT4
.long exception_error ! 77 0xba0
.long do_IRQ ! 78 0xbc0 DMA ERR
.long exception_error ! 79 0xbe0
.long do_IRQ ! 80 0xc00 PIO0
.long do_IRQ ! 81 0xc20 PIO1
.long do_IRQ ! 82 0xc40 PIO2
.long exception_error ! 83 0xc60
.long exception_error ! 84 0xc80
.long exception_error ! 85 0xca0
.long exception_error ! 86 0xcc0
.long exception_error ! 87 0xce0
.long exception_error ! 88 0xd00
.long exception_error ! 89 0xd20
.long exception_error ! 90 0xd40
.long exception_error ! 91 0xd60
.long exception_error ! 92 0xd80
.long exception_error ! 93 0xda0
.long exception_error ! 94 0xdc0
.long exception_error ! 95 0xde0
.long exception_error ! 96 0xe00
.long exception_error ! 97 0xe20
.long exception_error ! 98 0xe40
.long exception_error ! 99 0xe60
.long exception_error ! 100 0xe80
.long exception_error ! 101 0xea0
.long exception_error ! 102 0xec0
.long exception_error ! 103 0xee0
.long exception_error ! 104 0xf00
.long exception_error ! 105 0xf20
.long exception_error ! 106 0xf40
.long exception_error ! 107 0xf60
.long exception_error ! 108 0xf80
.long exception_error ! 109 0xfa0
.long exception_error ! 110 0xfc0
.long exception_error ! 111 0xfe0
.long do_IRQ ! 112 0x1000 Mailbox
.long exception_error ! 113 0x1020
.long exception_error ! 114 0x1040
.long exception_error ! 115 0x1060
.long exception_error ! 116 0x1080
.long exception_error ! 117 0x10a0
.long exception_error ! 118 0x10c0
.long exception_error ! 119 0x10e0
.long exception_error ! 120 0x1100
.long exception_error ! 121 0x1120
.long exception_error ! 122 0x1140
.long exception_error ! 123 0x1160
.long exception_error ! 124 0x1180
.long exception_error ! 125 0x11a0
.long exception_error ! 126 0x11c0
.long exception_error ! 127 0x11e0
.long exception_error ! 128 0x1200
.long exception_error ! 129 0x1220
.long exception_error ! 130 0x1240
.long exception_error ! 131 0x1260
.long exception_error ! 132 0x1280
.long exception_error ! 133 0x12a0
.long exception_error ! 134 0x12c0
.long exception_error ! 135 0x12e0
.long exception_error ! 136 0x1300
.long exception_error ! 137 0x1320
.long exception_error ! 138 0x1340
.long exception_error ! 139 0x1360
.long do_IRQ ! 140 0x1380 EMPI INV_ADDR
.long exception_error ! 141 0x13a0
.long exception_error ! 142 0x13c0
.long exception_error ! 143 0x13e0
#endif

View File

@@ -0,0 +1,335 @@
/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
*
* linux/arch/sh/kernel/fpu.c
*
* Save/restore floating point context for signal handlers.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* FIXME! These routines can be optimized in big endian case.
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
#include <asm/io.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
* Executing frchg with PR set causes a trap on some SH4 implementations.
*/
#define FPSCR_RCHG 0x00000000
/*
* Save FPU registers onto task structure.
* Assume called with FPU enabled (SR.FD=0).
*/
void
save_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
unsigned long dummy;
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
asm volatile("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
"lds %2, fpscr\n\t"
"frchg\n\t"
"fmov.s fr15, @-%0\n\t"
"fmov.s fr14, @-%0\n\t"
"fmov.s fr13, @-%0\n\t"
"fmov.s fr12, @-%0\n\t"
"fmov.s fr11, @-%0\n\t"
"fmov.s fr10, @-%0\n\t"
"fmov.s fr9, @-%0\n\t"
"fmov.s fr8, @-%0\n\t"
"fmov.s fr7, @-%0\n\t"
"fmov.s fr6, @-%0\n\t"
"fmov.s fr5, @-%0\n\t"
"fmov.s fr4, @-%0\n\t"
"fmov.s fr3, @-%0\n\t"
"fmov.s fr2, @-%0\n\t"
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"frchg\n\t"
"fmov.s fr15, @-%0\n\t"
"fmov.s fr14, @-%0\n\t"
"fmov.s fr13, @-%0\n\t"
"fmov.s fr12, @-%0\n\t"
"fmov.s fr11, @-%0\n\t"
"fmov.s fr10, @-%0\n\t"
"fmov.s fr9, @-%0\n\t"
"fmov.s fr8, @-%0\n\t"
"fmov.s fr7, @-%0\n\t"
"fmov.s fr6, @-%0\n\t"
"fmov.s fr5, @-%0\n\t"
"fmov.s fr4, @-%0\n\t"
"fmov.s fr3, @-%0\n\t"
"fmov.s fr2, @-%0\n\t"
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t"
: "=r" (dummy)
: "0" ((char *)(&tsk->thread.fpu.hard.status)),
"r" (FPSCR_RCHG),
"r" (FPSCR_INIT)
: "memory");
disable_fpu();
release_fpu(regs);
}
static void
restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile("lds %2, fpscr\n\t"
"fmov.s @%0+, fr0\n\t"
"fmov.s @%0+, fr1\n\t"
"fmov.s @%0+, fr2\n\t"
"fmov.s @%0+, fr3\n\t"
"fmov.s @%0+, fr4\n\t"
"fmov.s @%0+, fr5\n\t"
"fmov.s @%0+, fr6\n\t"
"fmov.s @%0+, fr7\n\t"
"fmov.s @%0+, fr8\n\t"
"fmov.s @%0+, fr9\n\t"
"fmov.s @%0+, fr10\n\t"
"fmov.s @%0+, fr11\n\t"
"fmov.s @%0+, fr12\n\t"
"fmov.s @%0+, fr13\n\t"
"fmov.s @%0+, fr14\n\t"
"fmov.s @%0+, fr15\n\t"
"frchg\n\t"
"fmov.s @%0+, fr0\n\t"
"fmov.s @%0+, fr1\n\t"
"fmov.s @%0+, fr2\n\t"
"fmov.s @%0+, fr3\n\t"
"fmov.s @%0+, fr4\n\t"
"fmov.s @%0+, fr5\n\t"
"fmov.s @%0+, fr6\n\t"
"fmov.s @%0+, fr7\n\t"
"fmov.s @%0+, fr8\n\t"
"fmov.s @%0+, fr9\n\t"
"fmov.s @%0+, fr10\n\t"
"fmov.s @%0+, fr11\n\t"
"fmov.s @%0+, fr12\n\t"
"fmov.s @%0+, fr13\n\t"
"fmov.s @%0+, fr14\n\t"
"fmov.s @%0+, fr15\n\t"
"frchg\n\t"
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
: "=r" (dummy)
: "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
: "memory");
disable_fpu();
}
/*
* Load the FPU with signalling NANS. This bit pattern we're using
* has the property that no matter wether considered as single or as
* double precission represents signaling NANS.
*/
static void
fpu_init(void)
{
enable_fpu();
asm volatile("lds %0, fpul\n\t"
"lds %1, fpscr\n\t"
"fsts fpul, fr0\n\t"
"fsts fpul, fr1\n\t"
"fsts fpul, fr2\n\t"
"fsts fpul, fr3\n\t"
"fsts fpul, fr4\n\t"
"fsts fpul, fr5\n\t"
"fsts fpul, fr6\n\t"
"fsts fpul, fr7\n\t"
"fsts fpul, fr8\n\t"
"fsts fpul, fr9\n\t"
"fsts fpul, fr10\n\t"
"fsts fpul, fr11\n\t"
"fsts fpul, fr12\n\t"
"fsts fpul, fr13\n\t"
"fsts fpul, fr14\n\t"
"fsts fpul, fr15\n\t"
"frchg\n\t"
"fsts fpul, fr0\n\t"
"fsts fpul, fr1\n\t"
"fsts fpul, fr2\n\t"
"fsts fpul, fr3\n\t"
"fsts fpul, fr4\n\t"
"fsts fpul, fr5\n\t"
"fsts fpul, fr6\n\t"
"fsts fpul, fr7\n\t"
"fsts fpul, fr8\n\t"
"fsts fpul, fr9\n\t"
"fsts fpul, fr10\n\t"
"fsts fpul, fr11\n\t"
"fsts fpul, fr12\n\t"
"fsts fpul, fr13\n\t"
"fsts fpul, fr14\n\t"
"fsts fpul, fr15\n\t"
"frchg\n\t"
"lds %2, fpscr\n\t"
: /* no output */
: "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
disable_fpu();
}
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_hard structure
* @n: Index to FP register
*/
static void
denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n+1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int
ieee_fpe_handler (struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *) regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf};
if (nib[0] == 0xb ||
(nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
regs->pr = regs->pc + 4;
if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (insn == 0x000b) { /* rts */
nextpc = regs->pr;
finsn = *(unsigned short *) (regs->pc + 2);
} else {
nextpc = regs->pc + 2;
finsn = insn;
}
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
save_fpu(tsk, regs);
if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
/* FPU error */
denormal_to_double (&tsk->thread.fpu.hard,
(finsn >> 8) & 0xf);
tsk->thread.fpu.hard.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs);
restore_fpu(tsk);
set_tsk_thread_flag(tsk, TIF_USEDFPU);
} else {
tsk->thread.trap_no = 11;
tsk->thread.error_code = 0;
force_sig(SIGFPE, tsk);
}
regs->pc = nextpc;
return 1;
}
return 0;
}
asmlinkage void
do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
struct task_struct *tsk = current;
if (ieee_fpe_handler (&regs))
return;
regs.pc += 2;
save_fpu(tsk, &regs);
tsk->thread.trap_no = 11;
tsk->thread.error_code = 0;
force_sig(SIGFPE, tsk);
}
asmlinkage void
do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
unsigned long r7, struct pt_regs regs)
{
struct task_struct *tsk = current;
grab_fpu(&regs);
if (!user_mode(&regs)) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
return;
}
if (used_math()) {
/* Using the FPU again. */
restore_fpu(tsk);
} else {
/* First time FPU user. */
fpu_init();
set_used_math();
}
set_tsk_thread_flag(tsk, TIF_USEDFPU);
}

View File

@@ -0,0 +1,222 @@
/*
* linux/arch/sh/kernel/irq_intc2.c
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Interrupt handling for INTC2-based IRQ.
*
* These are the "new Hitachi style" interrupts, as present on the
* Hitachi 7751 and the STM ST40 STB1.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/machvec.h>
struct intc2_data {
unsigned char msk_offset;
unsigned char msk_shift;
#ifdef CONFIG_CPU_SUBTYPE_ST40
int (*clear_irq) (int);
#endif
};
static struct intc2_data intc2_data[NR_INTC2_IRQS];
static void enable_intc2_irq(unsigned int irq);
static void disable_intc2_irq(unsigned int irq);
/* shutdown is same as "disable" */
#define shutdown_intc2_irq disable_intc2_irq
static void mask_and_ack_intc2(unsigned int);
static void end_intc2_irq(unsigned int irq);
static unsigned int startup_intc2_irq(unsigned int irq)
{
enable_intc2_irq(irq);
return 0; /* never anything pending */
}
static struct hw_interrupt_type intc2_irq_type = {
"INTC2-IRQ",
startup_intc2_irq,
shutdown_intc2_irq,
enable_intc2_irq,
disable_intc2_irq,
mask_and_ack_intc2,
end_intc2_irq
};
static void disable_intc2_irq(unsigned int irq)
{
int irq_offset = irq - INTC2_FIRST_IRQ;
int msk_shift, msk_offset;
// Sanity check
if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
return;
msk_shift = intc2_data[irq_offset].msk_shift;
msk_offset = intc2_data[irq_offset].msk_offset;
ctrl_outl(1<<msk_shift,
INTC2_BASE+INTC2_INTMSK_OFFSET+msk_offset);
}
static void enable_intc2_irq(unsigned int irq)
{
int irq_offset = irq - INTC2_FIRST_IRQ;
int msk_shift, msk_offset;
/* Sanity check */
if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
return;
msk_shift = intc2_data[irq_offset].msk_shift;
msk_offset = intc2_data[irq_offset].msk_offset;
ctrl_outl(1<<msk_shift,
INTC2_BASE+INTC2_INTMSKCLR_OFFSET+msk_offset);
}
static void mask_and_ack_intc2(unsigned int irq)
{
disable_intc2_irq(irq);
}
static void end_intc2_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
enable_intc2_irq(irq);
#ifdef CONFIG_CPU_SUBTYPE_ST40
if (intc2_data[irq - INTC2_FIRST_IRQ].clear_irq)
intc2_data[irq - INTC2_FIRST_IRQ].clear_irq (irq);
#endif
}
/*
* Setup an INTC2 style interrupt.
* NOTE: Unlike IPR interrupts, parameters are not shifted by this code,
* allowing the use of the numbers straight out of the datasheet.
* For example:
* PIO1 which is INTPRI00[19,16] and INTMSK00[13]
* would be: ^ ^ ^ ^
* | | | |
* make_intc2_irq(84, 0, 16, 0, 13);
*/
void make_intc2_irq(unsigned int irq,
unsigned int ipr_offset, unsigned int ipr_shift,
unsigned int msk_offset, unsigned int msk_shift,
unsigned int priority)
{
int irq_offset = irq - INTC2_FIRST_IRQ;
unsigned int flags;
unsigned long ipr;
if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
return;
disable_irq_nosync(irq);
/* Fill the data we need */
intc2_data[irq_offset].msk_offset = msk_offset;
intc2_data[irq_offset].msk_shift = msk_shift;
#ifdef CONFIG_CPU_SUBTYPE_ST40
intc2_data[irq_offset].clear_irq = NULL;
#endif
/* Set the priority level */
local_irq_save(flags);
ipr=ctrl_inl(INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
ipr&=~(0xf<<ipr_shift);
ipr|=(priority)<<ipr_shift;
ctrl_outl(ipr, INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
local_irq_restore(flags);
irq_desc[irq].handler=&intc2_irq_type;
disable_intc2_irq(irq);
}
#ifdef CONFIG_CPU_SUBTYPE_ST40
struct intc2_init {
unsigned short irq;
unsigned char ipr_offset, ipr_shift;
unsigned char msk_offset, msk_shift;
};
static struct intc2_init intc2_init_data[] __initdata = {
{64, 0, 0, 0, 0}, /* PCI serr */
{65, 0, 4, 0, 1}, /* PCI err */
{66, 0, 4, 0, 2}, /* PCI ad */
{67, 0, 4, 0, 3}, /* PCI pwd down */
{72, 0, 8, 0, 5}, /* DMAC INT0 */
{73, 0, 8, 0, 6}, /* DMAC INT1 */
{74, 0, 8, 0, 7}, /* DMAC INT2 */
{75, 0, 8, 0, 8}, /* DMAC INT3 */
{76, 0, 8, 0, 9}, /* DMAC INT4 */
{78, 0, 8, 0, 11}, /* DMAC ERR */
{80, 0, 12, 0, 12}, /* PIO0 */
{84, 0, 16, 0, 13}, /* PIO1 */
{88, 0, 20, 0, 14}, /* PIO2 */
{112, 4, 0, 4, 0}, /* Mailbox */
#ifdef CONFIG_CPU_SUBTYPE_ST40GX1
{116, 4, 4, 4, 4}, /* SSC0 */
{120, 4, 8, 4, 8}, /* IR Blaster */
{124, 4, 12, 4, 12}, /* USB host */
{128, 4, 16, 4, 16}, /* Video processor BLITTER */
{132, 4, 20, 4, 20}, /* UART0 */
{134, 4, 20, 4, 22}, /* UART2 */
{136, 4, 24, 4, 24}, /* IO_PIO0 */
{140, 4, 28, 4, 28}, /* EMPI */
{144, 8, 0, 8, 0}, /* MAFE */
{148, 8, 4, 8, 4}, /* PWM */
{152, 8, 8, 8, 8}, /* SSC1 */
{156, 8, 12, 8, 12}, /* IO_PIO1 */
{160, 8, 16, 8, 16}, /* USB target */
{164, 8, 20, 8, 20}, /* UART1 */
{168, 8, 24, 8, 24}, /* Teletext */
{172, 8, 28, 8, 28}, /* VideoSync VTG */
{173, 8, 28, 8, 29}, /* VideoSync DVP0 */
{174, 8, 28, 8, 30}, /* VideoSync DVP1 */
#endif
};
void __init init_IRQ_intc2(void)
{
struct intc2_init *p;
printk(KERN_ALERT "init_IRQ_intc2\n");
for (p = intc2_init_data;
p<intc2_init_data+ARRAY_SIZE(intc2_init_data);
p++) {
make_intc2_irq(p->irq, p->ipr_offset, p->ipr_shift,
p-> msk_offset, p->msk_shift, 13);
}
}
/* Adds a termination callback to the interrupt */
void intc2_add_clear_irq(int irq, int (*fn)(int))
{
if (irq < INTC2_FIRST_IRQ)
return;
intc2_data[irq - INTC2_FIRST_IRQ].clear_irq = fn;
}
#endif /* CONFIG_CPU_SUBTYPE_ST40 */

View File

@@ -0,0 +1,138 @@
/*
* arch/sh/kernel/cpu/sh4/probe.c
*
* CPU Subtype Probing for SH-4.
*
* Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
int __init detect_cpu_and_cache_system(void)
{
unsigned long pvr, prr, cvr;
unsigned long size;
static unsigned long sizes[16] = {
[1] = (1 << 12),
[2] = (1 << 13),
[4] = (1 << 14),
[8] = (1 << 15),
[9] = (1 << 16)
};
pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffff;
prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
cvr = (ctrl_inl(CCN_CVR));
/*
* Setup some sane SH-4 defaults for the icache
*/
cpu_data->icache.way_incr = (1 << 13);
cpu_data->icache.entry_shift = 5;
cpu_data->icache.entry_mask = 0x1fe0;
cpu_data->icache.sets = 256;
cpu_data->icache.ways = 1;
cpu_data->icache.linesz = L1_CACHE_BYTES;
/*
* And again for the dcache ..
*/
cpu_data->dcache.way_incr = (1 << 14);
cpu_data->dcache.entry_shift = 5;
cpu_data->dcache.entry_mask = 0x3fe0;
cpu_data->dcache.sets = 512;
cpu_data->dcache.ways = 1;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
/* Set the FPU flag, virtually all SH-4's have one */
cpu_data->flags |= CPU_HAS_FPU;
/*
* Probe the underlying processor version/revision and
* adjust cpu_data setup accordingly.
*/
switch (pvr) {
case 0x205:
cpu_data->type = CPU_SH7750;
cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
break;
case 0x206:
cpu_data->type = CPU_SH7750S;
cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
break;
case 0x1100:
cpu_data->type = CPU_SH7751;
break;
case 0x2000:
cpu_data->type = CPU_SH73180;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
cpu_data->flags &= ~CPU_HAS_FPU;
break;
case 0x8000:
cpu_data->type = CPU_ST40RA;
break;
case 0x8100:
cpu_data->type = CPU_ST40GX1;
break;
case 0x700:
cpu_data->type = CPU_SH4_501;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
/* No FPU on the SH4-500 series.. */
cpu_data->flags &= ~CPU_HAS_FPU;
break;
case 0x600:
cpu_data->type = CPU_SH4_202;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
break;
case 0x500 ... 0x501:
switch (prr) {
case 0x10: cpu_data->type = CPU_SH7750R; break;
case 0x11: cpu_data->type = CPU_SH7751R; break;
case 0x50: cpu_data->type = CPU_SH7760; break;
}
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
break;
default:
cpu_data->type = CPU_SH_NONE;
break;
}
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
*/
if (cpu_data->icache.ways > 1) {
size = sizes[(cvr >> 20) & 0xf];
cpu_data->icache.way_incr = (size >> 1);
cpu_data->icache.sets = (size >> 6);
cpu_data->icache.entry_mask =
(cpu_data->icache.way_incr - (1 << 5));
}
if (cpu_data->dcache.ways > 1) {
size = sizes[(cvr >> 16) & 0xf];
cpu_data->dcache.way_incr = (size >> 1);
cpu_data->dcache.sets = (size >> 6);
cpu_data->dcache.entry_mask =
(cpu_data->dcache.way_incr - (1 << 5));
}
return 0;
}

453
arch/sh/kernel/cpu/sh4/sq.c Normal file
View File

@@ -0,0 +1,453 @@
/*
* arch/sh/kernel/cpu/sq.c
*
* General management API for SH-4 integrated Store Queues
*
* Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2001, 2002 M. R. Brown
*
* Some of this code has been adopted directly from the old arch/sh/mm/sq.c
* hack that was part of the LinuxDC project. For all intents and purposes,
* this is a completely new interface that really doesn't have much in common
* with the old zone-based approach at all. In fact, it's only listed here for
* general completeness.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/cpu/sq.h>
static LIST_HEAD(sq_mapping_list);
static DEFINE_SPINLOCK(sq_mapping_lock);
/**
* sq_flush - Flush (prefetch) the store queue cache
* @addr: the store queue address to flush
*
* Executes a prefetch instruction on the specified store queue cache,
* so that the cached data is written to physical memory.
*/
inline void sq_flush(void *addr)
{
__asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory");
}
/**
* sq_flush_range - Flush (prefetch) a specific SQ range
* @start: the store queue address to start flushing from
* @len: the length to flush
*
* Flushes the store queue cache from @start to @start + @len in a
* linear fashion.
*/
void sq_flush_range(unsigned long start, unsigned int len)
{
volatile unsigned long *sq = (unsigned long *)start;
unsigned long dummy;
/* Flush the queues */
for (len >>= 5; len--; sq += 8)
sq_flush((void *)sq);
/* Wait for completion */
dummy = ctrl_inl(P4SEG_STORE_QUE);
ctrl_outl(0, P4SEG_STORE_QUE + 0);
ctrl_outl(0, P4SEG_STORE_QUE + 8);
}
static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name)
{
struct sq_mapping *map;
if (virt + size > SQ_ADDRMAX)
return ERR_PTR(-ENOSPC);
map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&map->list);
map->sq_addr = virt;
map->addr = phys;
map->size = size + 1;
map->name = name;
list_add(&map->list, &sq_mapping_list);
return map;
}
static unsigned long __sq_get_next_addr(void)
{
if (!list_empty(&sq_mapping_list)) {
struct list_head *pos, *tmp;
/*
* Read one off the list head, as it will have the highest
* mapped allocation. Set the next one up right above it.
*
* This is somewhat sub-optimal, as we don't look at
* gaps between allocations or anything lower then the
* highest-level allocation.
*
* However, in the interest of performance and the general
* lack of desire to do constant list rebalancing, we don't
* worry about it.
*/
list_for_each_safe(pos, tmp, &sq_mapping_list) {
struct sq_mapping *entry;
entry = list_entry(pos, typeof(*entry), list);
return entry->sq_addr + entry->size;
}
}
return P4SEG_STORE_QUE;
}
/**
* __sq_remap - Perform a translation from the SQ to a phys addr
* @map: sq mapping containing phys and store queue addresses.
*
* Maps the store queue address specified in the mapping to the physical
* address specified in the mapping.
*/
static struct sq_mapping *__sq_remap(struct sq_mapping *map)
{
unsigned long flags, pteh, ptel;
struct vm_struct *vma;
pgprot_t pgprot;
/*
* Without an MMU (or with it turned off), this is much more
* straightforward, as we can just load up each queue's QACR with
* the physical address appropriately masked.
*/
ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
#ifdef CONFIG_MMU
/*
* With an MMU on the other hand, things are slightly more involved.
* Namely, we have to have a direct mapping between the SQ addr and
* the associated physical address in the UTLB by way of setting up
* a virt<->phys translation by hand. We do this by simply specifying
* the SQ addr in UTLB.VPN and the associated physical address in
* UTLB.PPN.
*
* Notably, even though this is a special case translation, and some
* of the configuration bits are meaningless, we're still required
* to have a valid ASID context in PTEH.
*
* We could also probably get by without explicitly setting PTEA, but
* we do it here just for good measure.
*/
spin_lock_irqsave(&sq_mapping_lock, flags);
pteh = map->sq_addr;
ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH);
ptel = map->addr & PAGE_MASK;
ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA);
pgprot = pgprot_noncached(PAGE_KERNEL);
ptel &= _PAGE_FLAGS_HARDWARE_MASK;
ptel |= pgprot_val(pgprot);
ctrl_outl(ptel, MMU_PTEL);
__asm__ __volatile__ ("ldtlb" : : : "memory");
spin_unlock_irqrestore(&sq_mapping_lock, flags);
/*
* Next, we need to map ourselves in the kernel page table, so that
* future accesses after a TLB flush will be handled when we take a
* page fault.
*
* Theoretically we could just do this directly and not worry about
* setting up the translation by hand ahead of time, but for the
* cases where we want a one-shot SQ mapping followed by a quick
* writeout before we hit the TLB flush, we do it anyways. This way
* we at least save ourselves the initial page fault overhead.
*/
vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
if (!vma)
return ERR_PTR(-ENOMEM);
vma->phys_addr = map->addr;
if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
map->size, pgprot_val(pgprot))) {
vunmap(vma->addr);
return NULL;
}
#endif /* CONFIG_MMU */
return map;
}
/**
* sq_remap - Map a physical address through the Store Queues
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
* the procfs interface.
*
* A pre-allocated and filled sq_mapping pointer is returned, and must be
* cleaned up with a call to sq_unmap() when the user is done with the
* mapping.
*/
struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name)
{
struct sq_mapping *map;
unsigned long virt, end;
unsigned int psz;
/* Don't allow wraparound or zero size */
end = phys + size - 1;
if (!size || end < phys)
return NULL;
/* Don't allow anyone to remap normal memory.. */
if (phys < virt_to_phys(high_memory))
return NULL;
phys &= PAGE_MASK;
size = PAGE_ALIGN(end + 1) - phys;
virt = __sq_get_next_addr();
psz = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
map = __sq_alloc_mapping(virt, phys, size, name);
printk("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
map->name ? map->name : "???",
psz, psz == 1 ? " " : "s",
map->sq_addr, map->addr);
return __sq_remap(map);
}
/**
* sq_unmap - Unmap a Store Queue allocation
* @map: Pre-allocated Store Queue mapping.
*
* Unmaps the store queue allocation @map that was previously created by
* sq_remap(). Also frees up the pte that was previously inserted into
* the kernel page table and discards the UTLB translation.
*/
void sq_unmap(struct sq_mapping *map)
{
if (map->sq_addr > (unsigned long)high_memory)
vfree((void *)(map->sq_addr & PAGE_MASK));
list_del(&map->list);
kfree(map);
}
/**
* sq_clear - Clear a store queue range
* @addr: Address to start clearing from.
* @len: Length to clear.
*
* A quick zero-fill implementation for clearing out memory that has been
* remapped through the store queues.
*/
void sq_clear(unsigned long addr, unsigned int len)
{
int i;
/* Clear out both queues linearly */
for (i = 0; i < 8; i++) {
ctrl_outl(0, addr + i + 0);
ctrl_outl(0, addr + i + 8);
}
sq_flush_range(addr, len);
}
/**
* sq_vma_unmap - Unmap a VMA range
* @area: VMA containing range.
* @addr: Start of range.
* @len: Length of range.
*
* Searches the sq_mapping_list for a mapping matching the sq addr @addr,
* and subsequently frees up the entry. Further cleanup is done by generic
* code.
*/
static void sq_vma_unmap(struct vm_area_struct *area,
unsigned long addr, size_t len)
{
struct list_head *pos, *tmp;
list_for_each_safe(pos, tmp, &sq_mapping_list) {
struct sq_mapping *entry;
entry = list_entry(pos, typeof(*entry), list);
if (entry->sq_addr == addr) {
/*
* We could probably get away without doing the tlb flush
* here, as generic code should take care of most of this
* when unmapping the rest of the VMA range for us. Leave
* it in for added sanity for the time being..
*/
__flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
list_del(&entry->list);
kfree(entry);
return;
}
}
}
/**
* sq_vma_sync - Sync a VMA range
* @area: VMA containing range.
* @start: Start of range.
* @len: Length of range.
* @flags: Additional flags.
*
* Synchronizes an sq mapped range by flushing the store queue cache for
* the duration of the mapping.
*
* Used internally for user mappings, which must use msync() to prefetch
* the store queue cache.
*/
static int sq_vma_sync(struct vm_area_struct *area,
unsigned long start, size_t len, unsigned int flags)
{
sq_flush_range(start, len);
return 0;
}
static struct vm_operations_struct sq_vma_ops = {
.unmap = sq_vma_unmap,
.sync = sq_vma_sync,
};
/**
* sq_mmap - mmap() for /dev/cpu/sq
* @file: unused.
* @vma: VMA to remap.
*
* Remap the specified vma @vma through the store queues, and setup associated
* information for the new mapping. Also build up the page tables for the new
* area.
*/
static int sq_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct sq_mapping *map;
/*
* We're not interested in any arbitrary virtual address that has
* been stuck in the VMA, as we already know what addresses we
* want. Save off the size, and reposition the VMA to begin at
* the next available sq address.
*/
vma->vm_start = __sq_get_next_addr();
vma->vm_end = vma->vm_start + size;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_RESERVED;
map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace");
if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT,
size, vma->vm_page_prot))
return -EAGAIN;
vma->vm_ops = &sq_vma_ops;
return 0;
}
#ifdef CONFIG_PROC_FS
static int sq_mapping_read_proc(char *buf, char **start, off_t off,
int len, int *eof, void *data)
{
struct list_head *pos;
char *p = buf;
list_for_each_prev(pos, &sq_mapping_list) {
struct sq_mapping *entry;
entry = list_entry(pos, typeof(*entry), list);
p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,
entry->sq_addr + entry->size - 1, entry->addr,
entry->name);
}
return p - buf;
}
#endif
static struct file_operations sq_fops = {
.owner = THIS_MODULE,
.mmap = sq_mmap,
};
static struct miscdevice sq_dev = {
.minor = STORE_QUEUE_MINOR,
.name = "sq",
.devfs_name = "cpu/sq",
.fops = &sq_fops,
};
static int __init sq_api_init(void)
{
printk(KERN_NOTICE "sq: Registering store queue API.\n");
#ifdef CONFIG_PROC_FS
create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
#endif
return misc_register(&sq_dev);
}
static void __exit sq_api_exit(void)
{
misc_deregister(&sq_dev);
}
module_init(sq_api_init);
module_exit(sq_api_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR);
EXPORT_SYMBOL(sq_remap);
EXPORT_SYMBOL(sq_unmap);
EXPORT_SYMBOL(sq_clear);
EXPORT_SYMBOL(sq_flush);
EXPORT_SYMBOL(sq_flush_range);

59
arch/sh/kernel/cpu/ubc.S Normal file
View File

@@ -0,0 +1,59 @@
/*
* arch/sh/kernel/ubc.S
*
* Set of management routines for the User Break Controller (UBC)
*
* Copyright (C) 2002 Paul Mundt
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/linkage.h>
#include <asm/ubc.h>
#define STBCR2 0xffc00010
ENTRY(ubc_sleep)
mov #0, r0
mov.l 1f, r1 ! Zero out UBC_BBRA ..
mov.w r0, @r1
mov.l 2f, r1 ! .. same for BBRB ..
mov.w r0, @r1
mov.l 3f, r1 ! .. and again for BRCR.
mov.w r0, @r1
mov.w @r1, r0 ! Dummy read BRCR
mov.l 4f, r1 ! Set MSTP5 in STBCR2
mov.b @r1, r0
or #0x01, r0
mov.b r0, @r1
mov.b @r1, r0 ! Two dummy reads ..
mov.b @r1, r0
rts
nop
ENTRY(ubc_wakeup)
mov.l 4f, r1 ! Clear MSTP5
mov.b @r1, r0
and #0xfe, r0
mov.b r0, @r1
mov.b @r1, r0 ! Two more dummy reads ..
mov.b @r1, r0
rts
nop
1: .long UBC_BBRA
2: .long UBC_BBRB
3: .long UBC_BRCR
4: .long STBCR2