More AP / SP bits for the 34K, the Malta bits and things. Still wants

a little polishing.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle
2005-07-14 15:57:16 +00:00
parent 86071b637d
commit e01402b115
18 changed files with 2332 additions and 139 deletions

View File

@@ -34,12 +34,16 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o

View File

@@ -147,6 +147,38 @@ NESTED(except_vec_ejtag_debug, 0, sp)
__FINIT
/*
* Vectored interrupt handler.
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
NESTED(except_vec_vi, 0, sp)
SAVE_SOME
SAVE_AT
.set push
.set noreorder
EXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
j except_vec_vi_handler
EXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
END(except_vec_vi)
EXPORT(except_vec_vi_end)
/*
* Common Vectored Interrupt code
* Complete the register saves and invoke the handler which is passed in $v0
*/
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
CLI
move a0, sp
jalr v0
j ret_from_irq
END(except_vec_vi_handler)
/*
* EJTAG debug exception handler.
*/

View File

@@ -74,7 +74,7 @@ static void disable_msc_irq(unsigned int irq)
static void level_mask_and_ack_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
if (!cpu_has_ei)
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
}
@@ -84,7 +84,7 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
static void edge_mask_and_ack_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
if (!cpu_has_ei)
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
u32 r;
@@ -166,14 +166,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
irq_desc[base+n].handler = &msc_edgeirq_type;
if (cpu_has_ei)
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
irq_desc[base+n].handler = &msc_levelirq_type;
if (cpu_has_ei)
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);

341
arch/mips/kernel/rtlx.c Normal file
View File

@@ -0,0 +1,341 @@
/*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/rtlx.h>
#define RTLX_MAJOR 64
#define RTLX_TARG_VPE 1
struct rtlx_info *rtlx;
static int major;
static char module_name[] = "rtlx";
static inline int spacefree(int read, int write, int size);
static struct chan_waitqueues {
wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue;
} channel_wqs[RTLX_CHANNELS];
static struct irqaction irq;
static int irq_num;
extern void *vpe_get_shared(int index);
static void rtlx_dispatch(struct pt_regs *regs)
{
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
}
irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
irqreturn_t r = IRQ_HANDLED;
int i;
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
if (chan->lx_read != chan->lx_write)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
return r;
}
void dump_rtlx(void)
{
int i;
printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
printk(" rt_state %d lx_state %d buffer_size %d\n",
chan->rt_state, chan->lx_state, chan->buffer_size);
printk(" rt_read %d rt_write %d\n",
chan->rt_read, chan->rt_write);
printk(" lx_read %d lx_write %d\n",
chan->lx_read, chan->lx_write);
printk(" rt_buffer <%s>\n", chan->rt_buffer);
printk(" lx_buffer <%s>\n", chan->lx_buffer);
}
}
/* call when we have the address of the shared structure from the SP side. */
static int rtlx_init(struct rtlx_info *rtlxi)
{
int i;
if (rtlxi->id != RTLX_ID) {
printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
return (-ENOEXEC);
}
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
}
/* set up for interrupt handling */
memset(&irq, 0, sizeof(struct irqaction));
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
}
irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
irq.handler = rtlx_interrupt;
irq.flags = SA_INTERRUPT;
irq.name = "RTLX";
irq.dev_id = rtlx;
setup_irq(irq_num, &irq);
rtlx = rtlxi;
return (0);
}
/* only allow one open process at a time to open each channel */
static int rtlx_open(struct inode *inode, struct file *filp)
{
int minor, ret;
struct rtlx_channel *chan;
/* assume only 1 device at the mo. */
minor = MINOR(inode->i_rdev);
if (rtlx == NULL) {
struct rtlx_info **p;
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
printk(" vpe_get_shared is NULL. Has an SP program been loaded?\n");
return (-EFAULT);
}
if (*p == NULL) {
printk(" vpe_shared %p %p\n", p, *p);
return (-EFAULT);
}
if ((ret = rtlx_init(*p)) < 0)
return (ret);
}
chan = &rtlx->channel[minor];
/* already open? */
if (chan->lx_state == RTLX_STATE_OPENED)
return (-EBUSY);
chan->lx_state = RTLX_STATE_OPENED;
return (0);
}
static int rtlx_release(struct inode *inode, struct file *filp)
{
int minor;
minor = MINOR(inode->i_rdev);
rtlx->channel[minor].lx_state = RTLX_STATE_UNUSED;
return (0);
}
static unsigned int rtlx_poll(struct file *file, poll_table * wait)
{
int minor;
unsigned int mask = 0;
struct rtlx_channel *chan;
minor = MINOR(file->f_dentry->d_inode->i_rdev);
chan = &rtlx->channel[minor];
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
/* data available to read? */
if (chan->lx_read != chan->lx_write)
mask |= POLLIN | POLLRDNORM;
/* space to write */
if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size))
mask |= POLLOUT | POLLWRNORM;
return (mask);
}
static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
loff_t * ppos)
{
size_t fl = 0L;
int minor;
struct rtlx_channel *lx;
DECLARE_WAITQUEUE(wait, current);
minor = MINOR(file->f_dentry->d_inode->i_rdev);
lx = &rtlx->channel[minor];
/* data available? */
if (lx->lx_write == lx->lx_read) {
if (file->f_flags & O_NONBLOCK)
return (0); // -EAGAIN makes cat whinge
/* go to sleep */
add_wait_queue(&channel_wqs[minor].lx_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (lx->lx_write == lx->lx_read)
schedule();
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[minor].lx_queue, &wait);
/* back running */
}
/* find out how much in total */
count = min( count,
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size);
/* then how much from the read pointer onwards */
fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl);
/* and if there is anything left at the beginning of the buffer */
if ( count - fl )
copy_to_user (buffer + fl, lx->lx_buffer, count - fl);
/* update the index */
lx->lx_read += count;
lx->lx_read %= lx->buffer_size;
return (count);
}
static inline int spacefree(int read, int write, int size)
{
if (read == write) {
/* never fill the buffer completely, so indexes are always equal if empty
and only empty, or !equal if data available */
return (size - 1);
}
return ((read + size - write) % size) - 1;
}
static ssize_t rtlx_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
int minor;
struct rtlx_channel *rt;
size_t fl;
DECLARE_WAITQUEUE(wait, current);
minor = MINOR(file->f_dentry->d_inode->i_rdev);
rt = &rtlx->channel[minor];
/* any space left... */
if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) {
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size))
schedule();
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
}
/* total number of bytes to copy */
count = min( count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
/* if there's any left copy to the beginning of the buffer */
if( count - fl )
copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
rt->rt_write += count;
rt->rt_write %= rt->buffer_size;
return(count);
}
static struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
.open = rtlx_open,
.release = rtlx_release,
.write = rtlx_write,
.read = rtlx_read,
.poll = rtlx_poll
};
static int rtlx_module_init(void)
{
if ((major = register_chrdev(RTLX_MAJOR, module_name, &rtlx_fops)) < 0) {
printk("rtlx_module_init: unable to register device\n");
return (-EBUSY);
}
if (major == 0)
major = RTLX_MAJOR;
return (0);
}
static void rtlx_module_exit(void)
{
unregister_chrdev(major, module_name);
}
module_init(rtlx_module_init);
module_exit(rtlx_module_exit);
MODULE_DESCRIPTION("MIPS RTLX");
MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc");
MODULE_LICENSE("GPL");

View File

@@ -20,6 +20,7 @@
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -64,6 +65,9 @@ extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
/*
* These constant is for searching for possible module text segments.
@@ -813,6 +817,12 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
asmlinkage void do_default_vi(struct pt_regs *regs)
{
show_regs(regs);
panic("Caught unexpected vectored interrupt.");
}
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
@@ -921,7 +931,11 @@ void nmi_exception_handler(struct pt_regs *regs)
while(1) ;
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
/*
* As a side effect of the way this is implemented we're limited
@@ -935,13 +949,156 @@ void *set_except_vector(int n, void *addr)
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
*(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 |
*(volatile u32 *)(ebase + 0x200) = 0x08000000 |
(0x03ffffff & (handler >> 2));
flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204);
flush_icache_range(ebase + 0x200, ebase + 0x204);
}
return (void *)old_handler;
}
#ifdef CONFIG_CPU_MIPSR2
/*
* Shadow register allocation
* FIXME: SMP...
*/
/* MIPSR2 shadow register sets */
struct shadow_registers {
spinlock_t sr_lock; /* */
int sr_supported; /* Number of shadow register sets supported */
int sr_allocated; /* Bitmap of allocated shadow registers */
} shadow_registers;
void mips_srs_init(void)
{
#ifdef CONFIG_CPU_MIPSR2_SRS
shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported);
#else
shadow_registers.sr_supported = 1;
#endif
shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
spin_lock_init(&shadow_registers.sr_lock);
}
int mips_srs_max(void)
{
return shadow_registers.sr_supported;
}
int mips_srs_alloc (void)
{
struct shadow_registers *sr = &shadow_registers;
unsigned long flags;
int set;
spin_lock_irqsave(&sr->sr_lock, flags);
for (set = 0; set < sr->sr_supported; set++) {
if ((sr->sr_allocated & (1 << set)) == 0) {
sr->sr_allocated |= 1 << set;
spin_unlock_irqrestore(&sr->sr_lock, flags);
return set;
}
}
/* None available */
spin_unlock_irqrestore(&sr->sr_lock, flags);
return -1;
}
void mips_srs_free (int set)
{
struct shadow_registers *sr = &shadow_registers;
unsigned long flags;
spin_lock_irqsave(&sr->sr_lock, flags);
sr->sr_allocated &= ~(1 << set);
spin_unlock_irqrestore(&sr->sr_lock, flags);
}
void *set_vi_srs_handler (int n, void *addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
u32 *w;
unsigned char *b;
if (!cpu_has_veic && !cpu_has_vint)
BUG();
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
}
else
handler = (unsigned long) addr;
vi_handlers[n] = (unsigned long) addr;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
if (srs >= mips_srs_max())
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt (n, srs);
}
else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (mips_srs_max() > 1)
change_c0_srsmap (0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and a standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
const int handler_len = &except_vec_vi_end - &except_vec_vi;
const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic ("VECTORSPACING too small");
}
memcpy (b, &except_vec_vi, handler_len);
w = (u32 *)(b + lui_offset);
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
w = (u32 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler
*
* It is the handlers responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret"
*/
w = (u32 *)b;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0;
flush_icache_range((unsigned long)b, (unsigned long)(b+8));
}
return (void *)old_handler;
}
void *set_vi_handler (int n, void *addr)
{
return set_vi_srs_handler (n, addr, 0);
}
#endif
/*
* This is used by native signal handling
*/
@@ -1016,10 +1173,18 @@ void __init per_cpu_trap_init(void)
if (cpu_has_dsp)
set_c0_status(ST0_MX);
#ifdef CONFIG_CPU_MIPSR2
write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
#endif
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
* Interrupt handling.
*/
if (cpu_has_veic || cpu_has_vint) {
write_c0_ebase (ebase);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl (0x3e0, VECTORSPACING);
}
if (cpu_has_divec)
set_c0_cause(CAUSEF_IV);
@@ -1035,13 +1200,41 @@ void __init per_cpu_trap_init(void)
tlb_init();
}
/* Install CPU exception handler */
void __init set_handler (unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
flush_icache_range(ebase + offset, ebase + offset + size);
}
/* Install uncached CPU exception handler */
void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
{
#ifdef CONFIG_32BIT
unsigned long uncached_ebase = KSEG1ADDR(ebase);
#endif
#ifdef CONFIG_64BIT
unsigned long uncached_ebase = TO_UNCAC(ebase);
#endif
memcpy((void *)(uncached_ebase + offset), addr, size);
}
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec_ejtag_debug;
extern char except_vec4;
unsigned long i;
if (cpu_has_veic || cpu_has_vint)
ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
else
ebase = CAC_BASE;
#ifdef CONFIG_CPU_MIPSR2
mips_srs_init();
#endif
per_cpu_trap_init();
/*
@@ -1049,7 +1242,7 @@ void __init trap_init(void)
* This will be overriden later as suitable for a particular
* configuration.
*/
memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
@@ -1061,8 +1254,8 @@ void __init trap_init(void)
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag)
memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80);
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup ();
/*
* Only some CPUs have the watch exceptions.
@@ -1071,11 +1264,15 @@ void __init trap_init(void)
set_except_vector(23, handle_watch);
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
* Initialise interrupt handlers
*/
if (cpu_has_divec)
memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8);
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler (i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
@@ -1122,6 +1319,10 @@ void __init trap_init(void)
//set_except_vector(15, handle_ndc);
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
@@ -1146,5 +1347,5 @@ void __init trap_init(void)
signal32_init();
#endif
flush_icache_range(CAC_BASE, CAC_BASE + 0x400);
flush_icache_range(ebase, ebase + 0x400);
}

1295
arch/mips/kernel/vpe.c Normal file

File diff suppressed because it is too large Load Diff